From 97ea6cb319397dd87f638040781f6b284dc4ad68 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 May 2025 14:20:26 +0000 Subject: [PATCH] build(deps): bump github.com/open-policy-agent/opa from 0.70.0 to 1.4.2 Bumps [github.com/open-policy-agent/opa](https://github.com/open-policy-agent/opa) from 0.70.0 to 1.4.2. - [Release notes](https://github.com/open-policy-agent/opa/releases) - [Changelog](https://github.com/open-policy-agent/opa/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-policy-agent/opa/compare/v0.70.0...v1.4.2) --- updated-dependencies: - dependency-name: github.com/open-policy-agent/opa dependency-version: 1.4.2 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- go.mod | 56 +- go.sum | 127 +- vendor/github.com/OneOfOne/xxhash/.gitignore | 4 - vendor/github.com/OneOfOne/xxhash/.travis.yml | 13 - vendor/github.com/OneOfOne/xxhash/README.md | 74 - vendor/github.com/OneOfOne/xxhash/xxhash.go | 294 - .../github.com/OneOfOne/xxhash/xxhash_go17.go | 161 - .../github.com/OneOfOne/xxhash/xxhash_safe.go | 183 - .../OneOfOne/xxhash/xxhash_unsafe.go | 240 - .../cpuguy83/go-md2man/v2/md2man/roff.go | 11 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 7 +- .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 3 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 34 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 120 +- .../fsnotify/fsnotify/backend_fen.go | 324 +- .../fsnotify/fsnotify/backend_inotify.go | 594 +- .../fsnotify/fsnotify/backend_kqueue.go | 747 +- .../fsnotify/fsnotify/backend_other.go | 204 +- .../fsnotify/fsnotify/backend_windows.go | 305 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 368 +- .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 + .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 + .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 + .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 259 - .../fsnotify/fsnotify/system_bsd.go | 1 - .../fsnotify/fsnotify/system_darwin.go | 1 - .../options/annotations.pb.go | 119 +- .../options/annotations.proto | 7 + .../options/annotations.swagger.json | 44 - .../options/annotations_protoopaque.pb.go | 269 + .../protoc-gen-openapiv2/options/buf.gen.yaml | 7 + .../options/openapiv2.pb.go | 2413 +++++-- .../options/openapiv2.proto | 39 + .../options/openapiv2.swagger.json | 44 - .../options/openapiv2_protoopaque.pb.go | 4055 +++++++++++ .../grpc-gateway/v2/runtime/handler.go | 2 +- .../grpc-gateway/v2/runtime/mux.go | 8 + .../grpc-gateway/v2/runtime/query.go | 16 +- .../open-policy-agent/opa/ast/annotations.go | 962 +-- .../open-policy-agent/opa/ast/builtins.go | 3173 +-------- .../open-policy-agent/opa/ast/capabilities.go | 200 +- .../open-policy-agent/opa/ast/check.go | 1307 +--- .../open-policy-agent/opa/ast/compare.go | 361 +- .../open-policy-agent/opa/ast/compile.go | 5871 +--------------- .../opa/ast/compilehelper.go | 34 +- .../open-policy-agent/opa/ast/conflicts.go | 42 +- .../open-policy-agent/opa/ast/doc.go | 36 +- .../open-policy-agent/opa/ast/env.go | 518 +- .../open-policy-agent/opa/ast/errors.go | 99 +- .../open-policy-agent/opa/ast/index.go | 896 +-- .../open-policy-agent/opa/ast/interning.go | 24 + .../open-policy-agent/opa/ast/json/doc.go | 8 + .../open-policy-agent/opa/ast/json/json.go | 31 +- .../open-policy-agent/opa/ast/map.go | 121 +- .../open-policy-agent/opa/ast/marshal.go | 11 - .../open-policy-agent/opa/ast/parser.go | 2712 +------- .../open-policy-agent/opa/ast/parser_ext.go | 594 +- .../open-policy-agent/opa/ast/policy.go | 1970 +----- .../open-policy-agent/opa/ast/pretty.go | 70 +- .../open-policy-agent/opa/ast/schema.go | 52 +- .../open-policy-agent/opa/ast/strings.go | 8 +- .../open-policy-agent/opa/ast/term.go | 3100 +-------- .../open-policy-agent/opa/ast/transform.go | 401 +- .../open-policy-agent/opa/ast/unify.go | 225 +- .../open-policy-agent/opa/ast/varset.go | 89 +- .../open-policy-agent/opa/ast/visit.go | 704 +- .../open-policy-agent/opa/bundle/bundle.go | 1688 +---- .../open-policy-agent/opa/bundle/doc.go | 8 + .../open-policy-agent/opa/bundle/file.go | 482 +- .../open-policy-agent/opa/bundle/filefs.go | 127 +- .../open-policy-agent/opa/bundle/hash.go | 133 +- .../open-policy-agent/opa/bundle/keys.go | 126 +- .../open-policy-agent/opa/bundle/sign.go | 112 +- .../open-policy-agent/opa/bundle/store.go | 955 +-- .../open-policy-agent/opa/bundle/verify.go | 207 +- .../open-policy-agent/opa/capabilities/doc.go | 8 + .../opa/capabilities/v1.0.0.json | 4835 +++++++++++++ .../opa/capabilities/v1.0.1.json | 4835 +++++++++++++ .../opa/capabilities/v1.1.0.json | 4835 +++++++++++++ .../opa/capabilities/v1.2.0.json | 4849 +++++++++++++ .../opa/capabilities/v1.3.0.json | 4849 +++++++++++++ .../opa/capabilities/v1.4.0.json | 4849 +++++++++++++ .../opa/capabilities/v1.4.1.json | 4849 +++++++++++++ .../opa/capabilities/v1.4.2.json | 4849 +++++++++++++ .../opa/internal/bundle/utils.go | 13 +- .../opa/internal/cidr/merge/merge.go | 2 +- .../opa/internal/compiler/utils.go | 62 +- .../opa/internal/compiler/wasm/wasm.go | 55 +- .../opa/internal/config/config.go | 10 +- .../internal/edittree/bitvector/bitvector.go | 2 +- .../opa/internal/edittree/edittree.go | 55 +- .../opa/internal/future/filter_imports.go | 10 +- .../opa/internal/future/parser_opts.go | 5 +- .../gojsonschema/schemaReferencePool.go | 8 +- .../opa/internal/gojsonschema/validation.go | 4 +- .../opa/internal/gqlparser/ast/definition.go | 10 +- .../opa/internal/gqlparser/ast/directive.go | 2 +- .../opa/internal/gqlparser/ast/document.go | 8 +- .../opa/internal/gqlparser/ast/dumper.go | 12 +- .../opa/internal/gqlparser/ast/fragment.go | 6 +- .../opa/internal/gqlparser/ast/operation.go | 4 +- .../opa/internal/gqlparser/ast/selection.go | 4 +- .../opa/internal/gqlparser/ast/type.go | 2 +- .../opa/internal/gqlparser/ast/value.go | 4 +- .../validator/rules/fields_on_correct_type.go | 2 +- .../rules/fragments_on_composite_types.go | 4 +- .../validator/rules/no_fragment_cycles.go | 2 +- .../validator/rules/values_of_correct_type.go | 2 - .../opa/internal/gqlparser/validator/vars.go | 7 +- .../opa/internal/json/patch/patch.go | 6 +- .../opa/internal/jwx/jwk/jwk.go | 2 +- .../opa/internal/jwx/jwk/key_ops.go | 5 +- .../opa/internal/jwx/jws/jws.go | 4 +- .../opa/internal/jwx/jws/sign/sign.go | 5 +- .../opa/internal/jwx/jws/verify/verify.go | 3 +- .../opa/internal/planner/planner.go | 65 +- .../opa/internal/planner/rules.go | 6 +- .../opa/internal/planner/varstack.go | 4 +- .../internal/providers/aws/crypto/compare.go | 4 +- .../opa/internal/providers/aws/crypto/ecc.go | 3 +- .../opa/internal/providers/aws/ecr.go | 2 +- .../opa/internal/providers/aws/kms.go | 2 +- .../opa/internal/providers/aws/signing_v4.go | 22 +- .../opa/internal/providers/aws/signing_v4a.go | 12 +- .../opa/internal/providers/aws/util.go | 2 +- .../open-policy-agent/opa/internal/ref/ref.go | 4 +- .../opa/internal/rego/opa/options.go | 10 +- .../opa/internal/report/report.go | 10 +- .../opa/internal/runtime/init/init.go | 11 +- .../opa/internal/strings/strings.go | 2 +- .../opa/internal/strvals/parser.go | 6 +- .../opa/internal/version/version.go | 4 +- .../opa/internal/wasm/encoding/reader.go | 59 +- .../opa/internal/wasm/encoding/writer.go | 5 +- .../opa/internal/wasm/module/module.go | 8 +- .../open-policy-agent/opa/loader/doc.go | 8 + .../open-policy-agent/opa/loader/errors.go | 54 +- .../open-policy-agent/opa/loader/loader.go | 734 +- .../open-policy-agent/opa/rego/doc.go | 8 + .../open-policy-agent/opa/rego/errors.go | 17 +- .../open-policy-agent/opa/rego/plugins.go | 34 +- .../open-policy-agent/opa/rego/rego.go | 2513 +------ .../open-policy-agent/opa/rego/resultset.go | 80 +- .../open-policy-agent/opa/storage/doc.go | 4 + .../open-policy-agent/opa/storage/errors.go | 81 +- .../opa/storage/interface.go | 209 +- .../open-policy-agent/opa/storage/path.go | 132 +- .../open-policy-agent/opa/storage/storage.go | 97 +- .../opa/topdown/print/doc.go | 8 + .../opa/topdown/print/print.go | 13 +- .../opa/topdown/type_name.go | 36 - .../open-policy-agent/opa/types/decode.go | 181 +- .../open-policy-agent/opa/types/doc.go | 8 + .../open-policy-agent/opa/types/types.go | 1083 +-- .../open-policy-agent/opa/util/backoff.go | 53 - .../open-policy-agent/opa/util/hashmap.go | 157 - .../open-policy-agent/opa/util/maps.go | 10 - .../opa/v1/ast/annotations.go | 1008 +++ .../open-policy-agent/opa/v1/ast/builtins.go | 3407 ++++++++++ .../opa/v1/ast/capabilities.go | 266 + .../open-policy-agent/opa/v1/ast/check.go | 1327 ++++ .../open-policy-agent/opa/v1/ast/compare.go | 444 ++ .../open-policy-agent/opa/v1/ast/compile.go | 5979 +++++++++++++++++ .../opa/v1/ast/compilehelper.go | 62 + .../opa/{ => v1}/ast/compilemetrics.go | 0 .../open-policy-agent/opa/v1/ast/conflicts.go | 79 + .../open-policy-agent/opa/v1/ast/doc.go | 36 + .../open-policy-agent/opa/v1/ast/env.go | 528 ++ .../open-policy-agent/opa/v1/ast/errors.go | 124 + .../open-policy-agent/opa/v1/ast/index.go | 968 +++ .../{ => v1}/ast/internal/scanner/scanner.go | 54 +- .../{ => v1}/ast/internal/tokens/tokens.go | 12 +- .../open-policy-agent/opa/v1/ast/interning.go | 1120 +++ .../open-policy-agent/opa/v1/ast/json/json.go | 106 + .../opa/{ => v1}/ast/location/location.go | 12 +- .../open-policy-agent/opa/v1/ast/map.go | 108 + .../open-policy-agent/opa/v1/ast/parser.go | 2773 ++++++++ .../opa/v1/ast/parser_ext.go | 821 +++ .../open-policy-agent/opa/v1/ast/policy.go | 2013 ++++++ .../open-policy-agent/opa/v1/ast/pretty.go | 82 + .../opa/{ => v1}/ast/rego_v1.go | 4 +- .../open-policy-agent/opa/v1/ast/schema.go | 54 + .../open-policy-agent/opa/v1/ast/strings.go | 54 + .../open-policy-agent/opa/v1/ast/syncpools.go | 69 + .../open-policy-agent/opa/v1/ast/term.go | 3411 ++++++++++ .../open-policy-agent/opa/v1/ast/transform.go | 431 ++ .../open-policy-agent/opa/v1/ast/unify.go | 235 + .../open-policy-agent/opa/v1/ast/varset.go | 117 + .../opa/{ => v1}/ast/version_index.json | 14 + .../open-policy-agent/opa/v1/ast/visit.go | 783 +++ .../open-policy-agent/opa/v1/bundle/bundle.go | 1782 +++++ .../open-policy-agent/opa/v1/bundle/file.go | 517 ++ .../open-policy-agent/opa/v1/bundle/filefs.go | 143 + .../open-policy-agent/opa/v1/bundle/hash.go | 136 + .../open-policy-agent/opa/v1/bundle/keys.go | 144 + .../open-policy-agent/opa/v1/bundle/sign.go | 133 + .../open-policy-agent/opa/v1/bundle/store.go | 1166 ++++ .../open-policy-agent/opa/v1/bundle/verify.go | 232 + .../opa/v1/capabilities/capabilities.go | 18 + .../opa/{ => v1}/config/config.go | 11 +- .../opa/{ => v1}/format/format.go | 1112 ++- .../opa/{ => v1}/hooks/hooks.go | 2 +- .../open-policy-agent/opa/{ => v1}/ir/ir.go | 4 +- .../opa/{ => v1}/ir/marshal.go | 14 +- .../opa/{ => v1}/ir/pretty.go | 0 .../open-policy-agent/opa/{ => v1}/ir/walk.go | 0 .../opa/{ => v1}/keys/keys.go | 2 +- .../open-policy-agent/opa/v1/loader/errors.go | 62 + .../{ => v1}/loader/extension/extension.go | 0 .../opa/{ => v1}/loader/filter/filter.go | 0 .../open-policy-agent/opa/v1/loader/loader.go | 834 +++ .../opa/{ => v1}/logging/logging.go | 0 .../opa/{ => v1}/metrics/metrics.go | 8 +- .../opa/{ => v1}/plugins/plugins.go | 73 +- .../opa/{ => v1}/plugins/rest/auth.go | 33 +- .../opa/{ => v1}/plugins/rest/aws.go | 6 +- .../opa/{ => v1}/plugins/rest/azure.go | 0 .../opa/{ => v1}/plugins/rest/gcp.go | 0 .../opa/{ => v1}/plugins/rest/rest.go | 28 +- .../open-policy-agent/opa/v1/rego/errors.go | 24 + .../open-policy-agent/opa/v1/rego/plugins.go | 43 + .../open-policy-agent/opa/v1/rego/rego.go | 2944 ++++++++ .../opa/v1/rego/resultset.go | 90 + .../opa/{ => v1}/resolver/interface.go | 4 +- .../opa/{ => v1}/resolver/wasm/wasm.go | 11 +- .../{ => v1}/schemas/authorizationPolicy.json | 0 .../opa/{ => v1}/schemas/schemas.go | 0 .../open-policy-agent/opa/v1/storage/doc.go | 6 + .../opa/v1/storage/errors.go | 121 + .../opa/{ => v1}/storage/inmem/ast.go | 19 +- .../opa/{ => v1}/storage/inmem/inmem.go | 12 +- .../opa/{ => v1}/storage/inmem/opts.go | 0 .../opa/{ => v1}/storage/inmem/txn.go | 8 +- .../opa/v1/storage/interface.go | 247 + .../storage/internal/errors/errors.go | 8 +- .../opa/{ => v1}/storage/internal/ptr/ptr.go | 15 +- .../open-policy-agent/opa/v1/storage/path.go | 162 + .../opa/v1/storage/storage.go | 136 + .../opa/{ => v1}/topdown/aggregates.go | 67 +- .../opa/{ => v1}/topdown/arithmetic.go | 22 +- .../opa/{ => v1}/topdown/array.go | 19 +- .../opa/{ => v1}/topdown/binary.go | 4 +- .../opa/{ => v1}/topdown/bindings.go | 23 +- .../opa/{ => v1}/topdown/bits.go | 4 +- .../opa/{ => v1}/topdown/builtins.go | 13 +- .../opa/{ => v1}/topdown/builtins/builtins.go | 23 +- .../opa/{ => v1}/topdown/cache.go | 66 +- .../opa/{ => v1}/topdown/cache/cache.go | 242 +- .../opa/{ => v1}/topdown/cancel.go | 0 .../opa/{ => v1}/topdown/casts.go | 40 +- .../opa/{ => v1}/topdown/cidr.go | 26 +- .../opa/{ => v1}/topdown/comparison.go | 4 +- .../copypropagation/copypropagation.go | 8 +- .../topdown/copypropagation/unionfind.go | 22 +- .../opa/{ => v1}/topdown/crypto.go | 110 +- .../opa/{ => v1}/topdown/doc.go | 0 .../opa/{ => v1}/topdown/encoding.go | 18 +- .../opa/{ => v1}/topdown/errors.go | 2 +- .../opa/{ => v1}/topdown/eval.go | 835 ++- .../opa/{ => v1}/topdown/glob.go | 28 +- .../opa/{ => v1}/topdown/graphql.go | 404 +- .../opa/{ => v1}/topdown/http.go | 195 +- .../opa/{ => v1}/topdown/http_fixup.go | 0 .../opa/{ => v1}/topdown/http_fixup_darwin.go | 0 .../opa/{ => v1}/topdown/input.go | 6 +- .../opa/{ => v1}/topdown/instrumentation.go | 2 +- .../opa/{ => v1}/topdown/json.go | 19 +- .../opa/{ => v1}/topdown/jsonschema.go | 6 +- .../opa/{ => v1}/topdown/net.go | 4 +- .../opa/{ => v1}/topdown/numbers.go | 72 +- .../opa/{ => v1}/topdown/object.go | 46 +- .../opa/{ => v1}/topdown/parse.go | 5 +- .../opa/{ => v1}/topdown/parse_bytes.go | 28 +- .../opa/{ => v1}/topdown/parse_units.go | 6 +- .../opa/{ => v1}/topdown/print.go | 8 +- .../opa/v1/topdown/print/print.go | 21 + .../opa/{ => v1}/topdown/providers.go | 7 +- .../opa/{ => v1}/topdown/query.go | 72 +- .../opa/{ => v1}/topdown/reachable.go | 4 +- .../opa/{ => v1}/topdown/regex.go | 20 +- .../opa/{ => v1}/topdown/regex_template.go | 2 +- .../opa/{ => v1}/topdown/resolver.go | 17 +- .../opa/{ => v1}/topdown/runtime.go | 15 +- .../opa/{ => v1}/topdown/save.go | 15 +- .../opa/{ => v1}/topdown/semver.go | 10 +- .../opa/{ => v1}/topdown/sets.go | 6 +- .../opa/{ => v1}/topdown/strings.go | 262 +- .../opa/{ => v1}/topdown/subset.go | 59 +- .../opa/{ => v1}/topdown/template.go | 4 +- .../open-policy-agent/opa/v1/topdown/test.go | 30 + .../opa/{ => v1}/topdown/time.go | 18 +- .../opa/{ => v1}/topdown/tokens.go | 398 +- .../opa/{ => v1}/topdown/trace.go | 18 +- .../opa/{ => v1}/topdown/type.go | 30 +- .../opa/v1/topdown/type_name.go | 46 + .../opa/{ => v1}/topdown/uuid.go | 4 +- .../opa/{ => v1}/topdown/walk.go | 70 +- .../opa/{ => v1}/tracing/tracing.go | 0 .../open-policy-agent/opa/v1/types/decode.go | 191 + .../open-policy-agent/opa/v1/types/types.go | 1222 ++++ .../open-policy-agent/opa/v1/util/backoff.go | 42 + .../open-policy-agent/opa/v1/util/channel.go | 32 + .../opa/{ => v1}/util/close.go | 0 .../opa/{ => v1}/util/compare.go | 17 +- .../opa/{ => v1}/util/decoding/context.go | 0 .../opa/{ => v1}/util/doc.go | 0 .../opa/{ => v1}/util/enumflag.go | 0 .../opa/{ => v1}/util/graph.go | 0 .../open-policy-agent/opa/v1/util/hashmap.go | 271 + .../opa/{ => v1}/util/json.go | 2 +- .../open-policy-agent/opa/v1/util/maps.go | 34 + .../opa/v1/util/performance.go | 64 + .../opa/{ => v1}/util/queue.go | 0 .../opa/{ => v1}/util/read_gzip_body.go | 6 +- .../opa/{ => v1}/util/time.go | 0 .../opa/{ => v1}/util/wait.go | 4 +- .../opa/{ => v1}/version/version.go | 4 +- .../opa/{ => v1}/version/wasm.go | 0 .../client_golang/prometheus/desc.go | 15 +- .../prometheus/go_collector_latest.go | 2 +- .../client_golang/prometheus/histogram.go | 249 +- .../prometheus/internal/difflib.go | 19 +- .../prometheus/internal/go_runtime_metrics.go | 3 +- .../client_golang/prometheus/metric.go | 24 +- .../prometheus/process_collector.go | 31 +- .../prometheus/process_collector_darwin.go | 130 + .../process_collector_mem_cgo_darwin.c | 84 + .../process_collector_mem_cgo_darwin.go | 51 + .../process_collector_mem_nocgo_darwin.go | 39 + ....go => process_collector_not_supported.go} | 17 +- ....go => process_collector_procfsenabled.go} | 20 +- .../prometheus/process_collector_wasip1.go | 26 - .../prometheus/process_collector_windows.go | 21 +- .../client_golang/prometheus/promhttp/http.go | 23 +- .../client_golang/prometheus/summary.go | 7 +- .../prometheus/common/expfmt/decode.go | 14 +- .../prometheus/common/expfmt/encode.go | 28 +- .../prometheus/common/expfmt/expfmt.go | 78 +- .../common/expfmt/openmetrics_create.go | 10 +- .../prometheus/common/expfmt/text_create.go | 4 +- .../prometheus/common/expfmt/text_parse.go | 164 +- .../prometheus/common/model/alert.go | 7 +- .../prometheus/common/model/labels.go | 27 +- .../common/model/labelset_string.go | 2 - .../common/model/labelset_string_go120.go | 39 - .../prometheus/common/model/metric.go | 78 +- .../prometheus/common/model/silence.go | 17 +- .../prometheus/common/model/value_float.go | 3 +- .../common/model/value_histogram.go | 7 +- .../fsnotify => spf13/afero}/.editorconfig | 12 +- vendor/github.com/spf13/afero/.golangci.yaml | 18 + vendor/github.com/spf13/afero/README.md | 2 +- vendor/github.com/spf13/afero/iofs.go | 1 - vendor/github.com/spf13/afero/memmap.go | 2 - vendor/github.com/spf13/cobra/README.md | 5 +- vendor/github.com/spf13/cobra/active_help.go | 2 +- .../spf13/cobra/bash_completionsV2.go | 152 +- vendor/github.com/spf13/cobra/cobra.go | 16 +- vendor/github.com/spf13/cobra/command.go | 331 +- vendor/github.com/spf13/cobra/completions.go | 126 +- .../spf13/cobra/powershell_completions.go | 35 +- vendor/github.com/spf13/pflag/.editorconfig | 12 + vendor/github.com/spf13/pflag/.golangci.yaml | 4 + vendor/github.com/spf13/pflag/flag.go | 29 +- vendor/github.com/spf13/pflag/ip.go | 3 + vendor/github.com/spf13/pflag/ipnet_slice.go | 147 + vendor/github.com/spf13/pflag/string_array.go | 4 - .../auto/sdk/CONTRIBUTING.md | 27 + .../auto/sdk}/LICENSE | 16 +- .../auto/sdk/VERSIONING.md | 15 + vendor/go.opentelemetry.io/auto/sdk/doc.go | 14 + .../auto/sdk/internal/telemetry/attr.go | 58 + .../auto/sdk/internal/telemetry/doc.go | 8 + .../auto/sdk/internal/telemetry/id.go | 103 + .../auto/sdk/internal/telemetry/number.go | 67 + .../auto/sdk/internal/telemetry/resource.go | 66 + .../auto/sdk/internal/telemetry/scope.go | 67 + .../auto/sdk/internal/telemetry/span.go | 456 ++ .../auto/sdk/internal/telemetry/status.go | 40 + .../auto/sdk/internal/telemetry/traces.go | 189 + .../auto/sdk/internal/telemetry/value.go | 452 ++ vendor/go.opentelemetry.io/auto/sdk/limit.go | 94 + vendor/go.opentelemetry.io/auto/sdk/span.go | 432 ++ vendor/go.opentelemetry.io/auto/sdk/tracer.go | 124 + .../auto/sdk/tracer_provider.go | 33 + .../net/http/otelhttp/client.go | 6 +- .../net/http/otelhttp/handler.go | 45 +- .../otelhttp/internal/request/body_wrapper.go | 3 + .../net/http/otelhttp/internal/request/gen.go | 10 + .../internal/request/resp_writer_wrapper.go | 3 + .../net/http/otelhttp/internal/semconv/env.go | 158 +- .../net/http/otelhttp/internal/semconv/gen.go | 14 + .../otelhttp/internal/semconv/httpconv.go | 225 +- .../http/otelhttp/internal/semconv/util.go | 28 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 55 +- .../otelhttp/internal/semconvutil/netconv.go | 11 +- .../net/http/otelhttp/transport.go | 2 +- .../net/http/otelhttp/version.go | 2 +- .../contrib/zpages/internal/gen.go | 3 + .../contrib/zpages/version.go | 2 +- vendor/go.opentelemetry.io/otel/.gitignore | 1 + vendor/go.opentelemetry.io/otel/.golangci.yml | 121 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 87 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 12 + vendor/go.opentelemetry.io/otel/Makefile | 53 +- vendor/go.opentelemetry.io/otel/README.md | 15 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 17 +- vendor/go.opentelemetry.io/otel/VERSIONING.md | 2 +- .../otel/baggage/baggage.go | 4 +- .../go.opentelemetry.io/otel/codes/codes.go | 3 +- .../otel/dependencies.Dockerfile | 3 + .../otlp/otlptrace/otlptracegrpc/client.go | 2 +- .../internal/otlpconfig/options.go | 2 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../otel/internal/attribute/attribute.go | 44 +- .../otel/internal/global/trace.go | 25 + vendor/go.opentelemetry.io/otel/renovate.json | 8 +- .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../otel/sdk/resource/os_release_darwin.go | 3 +- .../otel/sdk/resource/resource.go | 25 +- .../otel/sdk/trace/batch_span_processor.go | 3 +- .../otel/sdk/trace/sampler_env.go | 5 +- .../otel/sdk/trace/sampling.go | 8 +- .../otel/sdk/trace/simple_span_processor.go | 2 +- .../otel/sdk/trace/span.go | 101 +- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- vendor/go.opentelemetry.io/otel/trace/auto.go | 661 ++ .../go.opentelemetry.io/otel/trace/config.go | 2 +- .../otel/trace/internal/telemetry/attr.go | 58 + .../otel/trace/internal/telemetry/doc.go | 8 + .../otel/trace/internal/telemetry/id.go | 103 + .../otel/trace/internal/telemetry/number.go | 67 + .../otel/trace/internal/telemetry/resource.go | 66 + .../otel/trace/internal/telemetry/scope.go | 67 + .../otel/trace/internal/telemetry/span.go | 460 ++ .../otel/trace/internal/telemetry/status.go | 40 + .../otel/trace/internal/telemetry/traces.go | 189 + .../otel/trace/internal/telemetry/value.go | 453 ++ vendor/go.opentelemetry.io/otel/trace/noop.go | 20 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 9 +- .../proto/otlp/trace/v1/trace.pb.go | 6 +- vendor/golang.org/x/oauth2/oauth2.go | 2 +- .../googleapis/api/annotations/client.pb.go | 745 +- .../googleapis/api/annotations/http.pb.go | 6 +- .../googleapis/api/annotations/resource.pb.go | 5 +- .../googleapis/api/annotations/routing.pb.go | 2 +- .../googleapis/api/httpbody/httpbody.pb.go | 6 +- .../rpc/errdetails/error_details.pb.go | 128 +- .../grpc/balancer/balancer.go | 115 +- .../endpointsharding/endpointsharding.go | 358 + .../balancer/pickfirst/internal/internal.go | 17 +- .../grpc/balancer/pickfirst/pickfirst.go | 2 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 493 +- .../grpc/balancer/roundrobin/roundrobin.go | 70 +- .../grpc/balancer/subconn.go | 134 + .../grpc/balancer_wrapper.go | 151 +- .../grpc_binarylog_v1/binarylog.pb.go | 296 +- vendor/google.golang.org/grpc/clientconn.go | 58 +- vendor/google.golang.org/grpc/codec.go | 2 +- .../google.golang.org/grpc/credentials/tls.go | 6 +- vendor/google.golang.org/grpc/dialoptions.go | 55 +- .../grpc/experimental/stats/metricregistry.go | 27 +- .../grpc/experimental/stats/metrics.go | 78 +- .../grpc/grpclog/internal/loggerv2.go | 107 +- .../grpc/health/grpc_health_v1/health.pb.go | 81 +- .../google.golang.org/grpc/health/producer.go | 106 + .../grpc/internal/backoff/backoff.go | 2 +- .../balancer/gracefulswitch/gracefulswitch.go | 10 +- .../grpc/internal/envconfig/envconfig.go | 2 +- .../grpc/internal/envconfig/xds.go | 10 + .../grpc/internal/grpcsync/oncefunc.go | 32 - .../grpc/internal/internal.go | 49 +- .../proxyattributes/proxyattributes.go | 54 + .../delegatingresolver/delegatingresolver.go | 353 + .../internal/resolver/dns/dns_resolver.go | 45 +- .../grpc/internal/transport/client_stream.go | 144 + .../grpc/internal/transport/flowcontrol.go | 9 +- .../grpc/internal/transport/handler_server.go | 38 +- .../grpc/internal/transport/http2_client.go | 100 +- .../grpc/internal/transport/http2_server.go | 73 +- .../grpc/internal/transport/proxy.go | 62 +- .../grpc/internal/transport/server_stream.go | 178 + .../grpc/internal/transport/transport.go | 323 +- .../grpc/mem/buffer_slice.go | 59 +- .../google.golang.org/grpc/picker_wrapper.go | 2 +- vendor/google.golang.org/grpc/preloader.go | 4 +- .../grpc_reflection_v1/reflection.pb.go | 328 +- .../grpc_reflection_v1alpha/reflection.pb.go | 324 +- vendor/google.golang.org/grpc/resolver/map.go | 103 +- .../grpc/resolver/resolver.go | 25 +- .../grpc/resolver_wrapper.go | 36 +- vendor/google.golang.org/grpc/rpc_util.go | 119 +- vendor/google.golang.org/grpc/server.go | 118 +- .../google.golang.org/grpc/service_config.go | 22 +- .../google.golang.org/grpc/stats/metrics.go | 81 + vendor/google.golang.org/grpc/stats/stats.go | 74 +- vendor/google.golang.org/grpc/stream.go | 348 +- vendor/google.golang.org/grpc/version.go | 2 +- .../editiondefaults/editions_defaults.binpb | Bin 138 -> 146 bytes .../protobuf/internal/filedesc/editions.go | 3 + .../protobuf/internal/genid/descriptor_gen.go | 16 + ...ings_unsafe_go121.go => strings_unsafe.go} | 2 - .../internal/strs/strings_unsafe_go120.go | 94 - .../protobuf/internal/version/version.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 6 + .../reflect/protoreflect/source_gen.go | 2 + ...{value_unsafe_go121.go => value_unsafe.go} | 2 - .../protoreflect/value_unsafe_go120.go | 98 - .../types/descriptorpb/descriptor.pb.go | 1439 ++-- .../types/gofeaturespb/go_features.pb.go | 80 +- .../protobuf/types/known/anypb/any.pb.go | 24 +- .../types/known/durationpb/duration.pb.go | 25 +- .../protobuf/types/known/emptypb/empty.pb.go | 20 +- .../types/known/fieldmaskpb/field_mask.pb.go | 23 +- .../types/known/structpb/struct.pb.go | 74 +- .../types/known/timestamppb/timestamp.pb.go | 25 +- .../types/known/wrapperspb/wrappers.pb.go | 103 +- vendor/modules.txt | 171 +- 532 files changed, 100540 insertions(+), 41743 deletions(-) delete mode 100644 vendor/github.com/OneOfOne/xxhash/.gitignore delete mode 100644 vendor/github.com/OneOfOne/xxhash/.travis.yml delete mode 100644 vendor/github.com/OneOfOne/xxhash/README.md delete mode 100644 vendor/github.com/OneOfOne/xxhash/xxhash.go delete mode 100644 vendor/github.com/OneOfOne/xxhash/xxhash_go17.go delete mode 100644 vendor/github.com/OneOfOne/xxhash/xxhash_safe.go delete mode 100644 vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go create mode 100644 vendor/github.com/open-policy-agent/opa/ast/interning.go create mode 100644 vendor/github.com/open-policy-agent/opa/ast/json/doc.go delete mode 100644 vendor/github.com/open-policy-agent/opa/ast/marshal.go create mode 100644 vendor/github.com/open-policy-agent/opa/bundle/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/loader/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/rego/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/topdown/print/doc.go delete mode 100644 vendor/github.com/open-policy-agent/opa/topdown/type_name.go create mode 100644 vendor/github.com/open-policy-agent/opa/types/doc.go delete mode 100644 vendor/github.com/open-policy-agent/opa/util/backoff.go delete mode 100644 vendor/github.com/open-policy-agent/opa/util/hashmap.go delete mode 100644 vendor/github.com/open-policy-agent/opa/util/maps.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/check.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/compare.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/compile.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/ast/compilemetrics.go (100%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/env.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/errors.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/index.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/ast/internal/scanner/scanner.go (91%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/ast/internal/tokens/tokens.go (93%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/interning.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/ast/location/location.go (91%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/map.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/parser.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/policy.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/ast/rego_v1.go (96%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/schema.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/strings.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/term.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/transform.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/unify.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/varset.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/ast/version_index.json (99%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/visit.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/file.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/store.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/config/config.go (96%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/format/format.go (56%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/hooks/hooks.go (98%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/ir/ir.go (99%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/ir/marshal.go (93%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/ir/pretty.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/ir/walk.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/keys/keys.go (98%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/loader/errors.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/loader/extension/extension.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/loader/filter/filter.go (100%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/loader/loader.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/logging/logging.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/metrics/metrics.go (97%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/plugins/plugins.go (94%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/plugins/rest/auth.go (96%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/plugins/rest/aws.go (99%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/plugins/rest/azure.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/plugins/rest/gcp.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/plugins/rest/rest.go (94%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/rego/errors.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/rego/rego.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/resolver/interface.go (86%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/resolver/wasm/wasm.go (94%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/schemas/authorizationPolicy.json (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/schemas/schemas.go (100%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/errors.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/storage/inmem/ast.go (94%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/storage/inmem/inmem.go (97%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/storage/inmem/opts.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/storage/inmem/txn.go (98%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/interface.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/storage/internal/errors/errors.go (85%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/storage/internal/ptr/ptr.go (83%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/path.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/storage.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/aggregates.go (80%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/arithmetic.go (92%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/array.go (86%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/binary.go (90%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/bindings.go (95%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/bits.go (96%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/builtins.go (95%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/builtins/builtins.go (94%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/cache.go (84%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/cache/cache.go (63%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/cancel.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/casts.go (76%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/cidr.go (94%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/comparison.go (90%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/copypropagation/copypropagation.go (97%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/copypropagation/unionfind.go (83%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/crypto.go (89%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/doc.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/encoding.go (95%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/errors.go (98%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/eval.go (85%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/glob.go (85%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/graphql.go (54%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/http.go (90%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/http_fixup.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/http_fixup_darwin.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/input.go (95%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/instrumentation.go (97%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/json.go (95%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/jsonschema.go (96%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/net.go (93%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/numbers.go (63%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/object.go (91%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/parse.go (91%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/parse_bytes.go (74%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/parse_units.go (95%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/print.go (88%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/providers.go (97%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/query.go (90%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/reachable.go (97%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/regex.go (93%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/regex_template.go (99%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/resolver.go (86%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/runtime.go (86%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/save.go (96%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/semver.go (85%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/sets.go (92%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/strings.go (71%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/subset.go (82%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/template.go (90%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/test.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/time.go (92%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/tokens.go (74%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/trace.go (98%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/type.go (71%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/uuid.go (92%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/topdown/walk.go (61%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/tracing/tracing.go (100%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/types/decode.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/types/types.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/backoff.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/channel.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/close.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/compare.go (92%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/decoding/context.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/doc.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/enumflag.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/graph.go (100%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/json.go (98%) create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/maps.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/performance.go rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/queue.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/read_gzip_body.go (95%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/time.go (100%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/util/wait.go (94%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/version/version.go (95%) rename vendor/github.com/open-policy-agent/opa/{ => v1}/version/wasm.go (100%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go rename vendor/github.com/prometheus/client_golang/prometheus/{process_collector_js.go => process_collector_not_supported.go} (56%) rename vendor/github.com/prometheus/client_golang/prometheus/{process_collector_other.go => process_collector_procfsenabled.go} (77%) delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset_string_go120.go rename vendor/github.com/{fsnotify/fsnotify => spf13/afero}/.editorconfig (69%) create mode 100644 vendor/github.com/spf13/afero/.golangci.yaml create mode 100644 vendor/github.com/spf13/pflag/.editorconfig create mode 100644 vendor/github.com/spf13/pflag/.golangci.yaml create mode 100644 vendor/github.com/spf13/pflag/ipnet_slice.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md rename vendor/{github.com/OneOfOne/xxhash => go.opentelemetry.io/auto/sdk}/LICENSE (94%) create mode 100644 vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/limit.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/dependencies.Dockerfile create mode 100644 vendor/go.opentelemetry.io/otel/trace/auto.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go create mode 100644 vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go create mode 100644 vendor/google.golang.org/grpc/balancer/subconn.go create mode 100644 vendor/google.golang.org/grpc/health/producer.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/client_stream.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/server_stream.go create mode 100644 vendor/google.golang.org/grpc/stats/metrics.go rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe_go121.go => strings_unsafe.go} (99%) delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe_go121.go => value_unsafe.go} (99%) delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go diff --git a/go.mod b/go.mod index 8336ea1604e..b93e4b8b998 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/google/uuid v1.6.0 github.com/gookit/config/v2 v2.2.5 github.com/gorilla/mux v1.8.1 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 github.com/invopop/validation v0.8.0 github.com/jellydator/ttlcache/v2 v2.11.1 github.com/jellydator/ttlcache/v3 v3.3.0 @@ -64,13 +64,13 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.36.3 - github.com/open-policy-agent/opa v0.70.0 + github.com/open-policy-agent/opa v1.4.2 github.com/orcaman/concurrent-map v1.0.0 github.com/owncloud/libre-graph-api-go v1.0.5-0.20250217093259-fa3804be6c27 github.com/owncloud/reva/v2 v2.0.0-20250502110034-f92362f1143d github.com/pkg/errors v0.9.1 github.com/pkg/xattr v0.4.10 - github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_golang v1.21.1 github.com/r3labs/sse/v2 v2.10.0 github.com/riandyrn/otelchi v0.11.0 github.com/rogpeppe/go-internal v1.13.1 @@ -78,8 +78,8 @@ require ( github.com/rs/zerolog v1.33.0 github.com/shamaton/msgpack/v2 v2.2.2 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/afero v1.11.0 - github.com/spf13/cobra v1.8.1 + github.com/spf13/afero v1.12.0 + github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.10.0 github.com/test-go/testify v1.1.4 github.com/thejerf/suture/v4 v4.0.6 @@ -91,24 +91,24 @@ require ( go-micro.dev/v4 v4.11.0 go.etcd.io/bbolt v1.3.11 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 - go.opentelemetry.io/contrib/zpages v0.57.0 - go.opentelemetry.io/otel v1.32.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 + go.opentelemetry.io/contrib/zpages v0.60.0 + go.opentelemetry.io/otel v1.35.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 - go.opentelemetry.io/otel/sdk v1.32.0 - go.opentelemetry.io/otel/trace v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 + go.opentelemetry.io/otel/sdk v1.35.0 + go.opentelemetry.io/otel/trace v1.35.0 golang.org/x/crypto v0.37.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c golang.org/x/image v0.22.0 golang.org/x/net v0.38.0 - golang.org/x/oauth2 v0.24.0 + golang.org/x/oauth2 v0.26.0 golang.org/x/sync v0.13.0 golang.org/x/term v0.31.0 golang.org/x/text v0.24.0 - google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 - google.golang.org/grpc v1.68.0 - google.golang.org/protobuf v1.36.5 + google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a + google.golang.org/grpc v1.71.1 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 gotest.tools/v3 v3.5.1 stash.kopano.io/kgol/rndm v1.1.2 @@ -122,10 +122,9 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/RoaringBitmap/roaring v1.9.3 // indirect - github.com/agnivade/levenshtein v1.2.0 // indirect + github.com/agnivade/levenshtein v1.2.1 // indirect github.com/ajg/form v1.5.1 // indirect github.com/alexedwards/argon2id v1.0.0 // indirect github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 // indirect @@ -164,7 +163,7 @@ require ( github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cornelk/hashmap v1.0.8 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/crewjam/httperr v0.2.0 // indirect github.com/crewjam/saml v0.4.14 // indirect github.com/cyphar/filepath-securejoin v0.2.5 // indirect @@ -172,6 +171,7 @@ require ( github.com/deckarep/golang-set v1.8.0 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/ristretto v0.2.0 // indirect + github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dlclark/regexp2 v1.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -183,7 +183,7 @@ require ( github.com/fatih/color v1.14.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/frankban/quicktest v1.14.6 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/gdexlab/go-render v1.0.1 // indirect github.com/go-acme/lego/v4 v4.4.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect @@ -222,7 +222,6 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.4 // indirect github.com/gomodule/redigo v1.9.2 // indirect - github.com/google/flatbuffers v2.0.8+incompatible // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-tpm v0.9.3 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect @@ -285,7 +284,7 @@ require ( github.com/pquerna/cachecontrol v0.2.0 // indirect github.com/prometheus/alertmanager v0.27.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/statsd_exporter v0.22.8 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect @@ -302,10 +301,10 @@ require ( github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect github.com/skeema/knownhosts v1.3.0 // indirect github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/studio-b12/gowebdav v0.9.0 // indirect - github.com/tchap/go-patricia/v2 v2.3.1 // indirect + github.com/tchap/go-patricia/v2 v2.3.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect @@ -321,9 +320,10 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect go.etcd.io/etcd/client/v3 v3.5.16 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.9.0 // indirect @@ -333,8 +333,8 @@ require ( golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.31.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect + google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/go.sum b/go.sum index b94a2915351..987e90fc927 100644 --- a/go.sum +++ b/go.sum @@ -84,8 +84,6 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA github.com/Nerzal/gocloak/v13 v13.9.0 h1:YWsJsdM5b0yhM2Ba3MLydiOlujkBry4TtdzfIzSVZhw= github.com/Nerzal/gocloak/v13 v13.9.0/go.mod h1:YYuDcXZ7K2zKECyVP7pPqjKxx2AzYSpKDj8d6GuyM10= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= -github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks= github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= @@ -93,8 +91,8 @@ github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4 github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= -github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/akamai/AkamaiOPEN-edgegrid-golang v1.1.0/go.mod h1:kX6YddBkXqqywAe8c9LyvgTCyFuZCTMF4cRPQhc3Fy8= @@ -206,7 +204,6 @@ github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/ceph/go-ceph v0.30.0 h1:p/+rNnn9dUByrDhXfBFilVriRZKJghMJcts8N2wQ+ws= github.com/ceph/go-ceph v0.30.0/go.mod h1:OJFju/Xmtb7ihHo/aXOayw6RhVOUGNke5EwTipwaf6A= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -241,9 +238,8 @@ github.com/cornelk/hashmap v1.0.8/go.mod h1:RfZb7JO3RviW/rT6emczVuC/oxpdz4UsSB2L github.com/cpu/goacmedns v0.1.1/go.mod h1:MuaouqEhPAHxsbqjgnck5zeghuwBP1dLnPoobeGqugQ= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= -github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo= github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4= @@ -265,13 +261,15 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3 github.com/deepmap/oapi-codegen v1.3.11/go.mod h1:suMvK7+rKlx3+tpa8ByptmvoXbAV70wERKTOGH3hLp0= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= -github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= -github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= +github.com/dgraph-io/badger/v4 v4.7.0 h1:Q+J8HApYAY7UMpL8d9owqiB+odzEc0zn/aqOD9jhc6Y= +github.com/dgraph-io/badger/v4 v4.7.0/go.mod h1:He7TzG3YBy3j4f5baj5B7Zl2XyfNe5bl4Udl0aPemVA= github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= +github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= +github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -324,8 +322,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/gdexlab/go-render v1.0.1 h1:rxqB3vo5s4n1kF0ySmoNeSPRYkEsyHgln4jFIQY7v0U= @@ -513,8 +511,8 @@ github.com/gomodule/redigo v1.9.2 h1:HrutZBLhSIU8abiSfW8pj8mPhOyMYjZT/wcA4/L9L9s github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= +github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -590,8 +588,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vb github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -873,8 +871,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= -github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U= -github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI= +github.com/open-policy-agent/opa v1.4.2 h1:ag4upP7zMsa4WE2p1pwAFeG4Pn3mNwfAx9DLhhJfbjU= +github.com/open-policy-agent/opa v1.4.2/go.mod h1:DNzZPKqKh4U0n0ANxcCVlw8lCSv2c+h5G/3QvSYdWZ8= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -938,8 +936,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= +github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -959,8 +957,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1051,18 +1049,19 @@ github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784/go.mod github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1086,8 +1085,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= -github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= github.com/thanhpk/randstr v1.0.6 h1:psAOktJFD4vV9NEVb3qkhRSMvYh4ORRaj1+w/hn4B+o= @@ -1174,28 +1173,34 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 h1:qtFISDHKolvIxzSs0gIaiPUPR0Cucb0F2coHC7ZLdps= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0/go.mod h1:Y+Pop1Q6hCOnETWTW4NROK/q1hv50hM7yDaUTjG8lp8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/contrib/zpages v0.57.0 h1:mHFZlTkyrUJcuBhpytPSaVPiVkqri96RKUDk01d83eQ= -go.opentelemetry.io/contrib/zpages v0.57.0/go.mod h1:u/SScNsxj6TacMBA6KCJZjXVC1uwkdVgLFyHrOe0x9M= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/contrib/zpages v0.60.0 h1:wOM9ie1Hz4H88L9KE6GrGbKJhfm+8F1NfW/Y3q9Xt+8= +go.opentelemetry.io/contrib/zpages v0.60.0/go.mod h1:xqfToSRGh2MYUsfyErNz8jnNDPlnpZqWM/y6Z2Cx7xw= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1347,8 +1352,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= -golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= +golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1603,12 +1608,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= -google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= -google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 h1:pgr/4QbFyktUv9CtQ/Fq4gzEE6/Xs7iCXbktaGzLHbQ= -google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697/go.mod h1:+D9ySVjN8nY8YCVjc5O7PZDIdZporIDY3KaGfJunh88= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 h1:LWZqQOEjDyONlF1H6afSWpAL/znlREo2tHfLoe+8LMA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1624,8 +1629,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= -google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= +google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= +google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e h1:m7aQHHqd0q89mRwhwS9Bx2rjyl/hsFAeta+uGrHsQaU= google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1642,8 +1647,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= diff --git a/vendor/github.com/OneOfOne/xxhash/.gitignore b/vendor/github.com/OneOfOne/xxhash/.gitignore deleted file mode 100644 index f4faa7f8f11..00000000000 --- a/vendor/github.com/OneOfOne/xxhash/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.txt -*.pprof -cmap2/ -cache/ diff --git a/vendor/github.com/OneOfOne/xxhash/.travis.yml b/vendor/github.com/OneOfOne/xxhash/.travis.yml deleted file mode 100644 index 1c6dc55bc73..00000000000 --- a/vendor/github.com/OneOfOne/xxhash/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go -sudo: false - -go: - - "1.10" - - "1.11" - - "1.12" - - master - -script: - - go test -tags safe ./... - - go test ./... - - diff --git a/vendor/github.com/OneOfOne/xxhash/README.md b/vendor/github.com/OneOfOne/xxhash/README.md deleted file mode 100644 index 8eea28c394d..00000000000 --- a/vendor/github.com/OneOfOne/xxhash/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# xxhash [![GoDoc](https://godoc.org/github.com/OneOfOne/xxhash?status.svg)](https://godoc.org/github.com/OneOfOne/xxhash) [![Build Status](https://travis-ci.org/OneOfOne/xxhash.svg?branch=master)](https://travis-ci.org/OneOfOne/xxhash) [![Coverage](https://gocover.io/_badge/github.com/OneOfOne/xxhash)](https://gocover.io/github.com/OneOfOne/xxhash) - -This is a native Go implementation of the excellent [xxhash](https://github.com/Cyan4973/xxHash)* algorithm, an extremely fast non-cryptographic Hash algorithm, working at speeds close to RAM limits. - -* The C implementation is ([Copyright](https://github.com/Cyan4973/xxHash/blob/master/LICENSE) (c) 2012-2014, Yann Collet) - -## Install - - go get github.com/OneOfOne/xxhash - -## Features - -* On Go 1.7+ the pure go version is faster than CGO for all inputs. -* Supports ChecksumString{32,64} xxhash{32,64}.WriteString, which uses no copies when it can, falls back to copy on appengine. -* The native version falls back to a less optimized version on appengine due to the lack of unsafe. -* Almost as fast as the mostly pure assembly version written by the brilliant [cespare](https://github.com/cespare/xxhash), while also supporting seeds. -* To manually toggle the appengine version build with `-tags safe`. - -## Benchmark - -### Core i7-4790 @ 3.60GHz, Linux 4.12.6-1-ARCH (64bit), Go tip (+ff90f4af66 2017-08-19) - -```bash -➤ go test -bench '64' -count 5 -tags cespare | benchstat /dev/stdin -name time/op - -# https://github.com/cespare/xxhash -XXSum64Cespare/Func-8 160ns ± 2% -XXSum64Cespare/Struct-8 173ns ± 1% -XXSum64ShortCespare/Func-8 6.78ns ± 1% -XXSum64ShortCespare/Struct-8 19.6ns ± 2% - -# this package (default mode, using unsafe) -XXSum64/Func-8 170ns ± 1% -XXSum64/Struct-8 182ns ± 1% -XXSum64Short/Func-8 13.5ns ± 3% -XXSum64Short/Struct-8 20.4ns ± 0% - -# this package (appengine, *not* using unsafe) -XXSum64/Func-8 241ns ± 5% -XXSum64/Struct-8 243ns ± 6% -XXSum64Short/Func-8 15.2ns ± 2% -XXSum64Short/Struct-8 23.7ns ± 5% - -CRC64ISO-8 1.23µs ± 1% -CRC64ISOString-8 2.71µs ± 4% -CRC64ISOShort-8 22.2ns ± 3% - -Fnv64-8 2.34µs ± 1% -Fnv64Short-8 74.7ns ± 8% -``` - -## Usage - -```go - h := xxhash.New64() - // r, err := os.Open("......") - // defer f.Close() - r := strings.NewReader(F) - io.Copy(h, r) - fmt.Println("xxhash.Backend:", xxhash.Backend) - fmt.Println("File checksum:", h.Sum64()) -``` - -[playground](https://play.golang.org/p/wHKBwfu6CPV) - -## TODO - -* Rewrite the 32bit version to be more optimized. -* General cleanup as the Go inliner gets smarter. - -## License - -This project is released under the Apache v2. license. See [LICENSE](LICENSE) for more details. diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash.go b/vendor/github.com/OneOfOne/xxhash/xxhash.go deleted file mode 100644 index af2496b77f7..00000000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash.go +++ /dev/null @@ -1,294 +0,0 @@ -package xxhash - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - prime32x1 uint32 = 2654435761 - prime32x2 uint32 = 2246822519 - prime32x3 uint32 = 3266489917 - prime32x4 uint32 = 668265263 - prime32x5 uint32 = 374761393 - - prime64x1 uint64 = 11400714785074694791 - prime64x2 uint64 = 14029467366897019727 - prime64x3 uint64 = 1609587929392839161 - prime64x4 uint64 = 9650029242287828579 - prime64x5 uint64 = 2870177450012600261 - - maxInt32 int32 = (1<<31 - 1) - - // precomputed zero Vs for seed 0 - zero64x1 = 0x60ea27eeadc0b5d6 - zero64x2 = 0xc2b2ae3d27d4eb4f - zero64x3 = 0x0 - zero64x4 = 0x61c8864e7a143579 -) - -const ( - magic32 = "xxh\x07" - magic64 = "xxh\x08" - marshaled32Size = len(magic32) + 4*7 + 16 - marshaled64Size = len(magic64) + 8*6 + 32 + 1 -) - -func NewHash32() hash.Hash { return New32() } -func NewHash64() hash.Hash { return New64() } - -// Checksum32 returns the checksum of the input data with the seed set to 0. -func Checksum32(in []byte) uint32 { - return Checksum32S(in, 0) -} - -// ChecksumString32 returns the checksum of the input data, without creating a copy, with the seed set to 0. -func ChecksumString32(s string) uint32 { - return ChecksumString32S(s, 0) -} - -type XXHash32 struct { - mem [16]byte - ln, memIdx int32 - v1, v2, v3, v4 uint32 - seed uint32 -} - -// Size returns the number of bytes Sum will return. -func (xx *XXHash32) Size() int { - return 4 -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (xx *XXHash32) BlockSize() int { - return 16 -} - -// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed. -func NewS32(seed uint32) (xx *XXHash32) { - xx = &XXHash32{ - seed: seed, - } - xx.Reset() - return -} - -// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0. -func New32() *XXHash32 { - return NewS32(0) -} - -func (xx *XXHash32) Reset() { - xx.v1 = xx.seed + prime32x1 + prime32x2 - xx.v2 = xx.seed + prime32x2 - xx.v3 = xx.seed - xx.v4 = xx.seed - prime32x1 - xx.ln, xx.memIdx = 0, 0 -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xx *XXHash32) Sum(in []byte) []byte { - s := xx.Sum32() - return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (xx *XXHash32) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaled32Size) - b = append(b, magic32...) - b = appendUint32(b, xx.v1) - b = appendUint32(b, xx.v2) - b = appendUint32(b, xx.v3) - b = appendUint32(b, xx.v4) - b = appendUint32(b, xx.seed) - b = appendInt32(b, xx.ln) - b = appendInt32(b, xx.memIdx) - b = append(b, xx.mem[:]...) - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (xx *XXHash32) UnmarshalBinary(b []byte) error { - if len(b) < len(magic32) || string(b[:len(magic32)]) != magic32 { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaled32Size { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic32):] - b, xx.v1 = consumeUint32(b) - b, xx.v2 = consumeUint32(b) - b, xx.v3 = consumeUint32(b) - b, xx.v4 = consumeUint32(b) - b, xx.seed = consumeUint32(b) - b, xx.ln = consumeInt32(b) - b, xx.memIdx = consumeInt32(b) - copy(xx.mem[:], b) - return nil -} - -// Checksum64 an alias for Checksum64S(in, 0) -func Checksum64(in []byte) uint64 { - return Checksum64S(in, 0) -} - -// ChecksumString64 returns the checksum of the input data, without creating a copy, with the seed set to 0. -func ChecksumString64(s string) uint64 { - return ChecksumString64S(s, 0) -} - -type XXHash64 struct { - v1, v2, v3, v4 uint64 - seed uint64 - ln uint64 - mem [32]byte - memIdx int8 -} - -// Size returns the number of bytes Sum will return. -func (xx *XXHash64) Size() int { - return 8 -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (xx *XXHash64) BlockSize() int { - return 32 -} - -// NewS64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the specific seed. -func NewS64(seed uint64) (xx *XXHash64) { - xx = &XXHash64{ - seed: seed, - } - xx.Reset() - return -} - -// New64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the seed set to 0x0. -func New64() *XXHash64 { - return NewS64(0) -} - -func (xx *XXHash64) Reset() { - xx.ln, xx.memIdx = 0, 0 - xx.v1, xx.v2, xx.v3, xx.v4 = resetVs64(xx.seed) -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xx *XXHash64) Sum(in []byte) []byte { - s := xx.Sum64() - return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (xx *XXHash64) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaled64Size) - b = append(b, magic64...) - b = appendUint64(b, xx.v1) - b = appendUint64(b, xx.v2) - b = appendUint64(b, xx.v3) - b = appendUint64(b, xx.v4) - b = appendUint64(b, xx.seed) - b = appendUint64(b, xx.ln) - b = append(b, byte(xx.memIdx)) - b = append(b, xx.mem[:]...) - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (xx *XXHash64) UnmarshalBinary(b []byte) error { - if len(b) < len(magic64) || string(b[:len(magic64)]) != magic64 { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaled64Size { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic64):] - b, xx.v1 = consumeUint64(b) - b, xx.v2 = consumeUint64(b) - b, xx.v3 = consumeUint64(b) - b, xx.v4 = consumeUint64(b) - b, xx.seed = consumeUint64(b) - b, xx.ln = consumeUint64(b) - xx.memIdx = int8(b[0]) - b = b[1:] - copy(xx.mem[:], b) - return nil -} - -func appendInt32(b []byte, x int32) []byte { return appendUint32(b, uint32(x)) } - -func appendUint32(b []byte, x uint32) []byte { - var a [4]byte - binary.LittleEndian.PutUint32(a[:], x) - return append(b, a[:]...) -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeInt32(b []byte) ([]byte, int32) { bn, x := consumeUint32(b); return bn, int32(x) } -func consumeUint32(b []byte) ([]byte, uint32) { x := u32(b); return b[4:], x } -func consumeUint64(b []byte) ([]byte, uint64) { x := u64(b); return b[8:], x } - -// force the compiler to use ROTL instructions - -func rotl32_1(x uint32) uint32 { return (x << 1) | (x >> (32 - 1)) } -func rotl32_7(x uint32) uint32 { return (x << 7) | (x >> (32 - 7)) } -func rotl32_11(x uint32) uint32 { return (x << 11) | (x >> (32 - 11)) } -func rotl32_12(x uint32) uint32 { return (x << 12) | (x >> (32 - 12)) } -func rotl32_13(x uint32) uint32 { return (x << 13) | (x >> (32 - 13)) } -func rotl32_17(x uint32) uint32 { return (x << 17) | (x >> (32 - 17)) } -func rotl32_18(x uint32) uint32 { return (x << 18) | (x >> (32 - 18)) } - -func rotl64_1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } -func rotl64_7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } -func rotl64_11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } -func rotl64_12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } -func rotl64_18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } -func rotl64_23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } -func rotl64_27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } -func rotl64_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } - -func mix64(h uint64) uint64 { - h ^= h >> 33 - h *= prime64x2 - h ^= h >> 29 - h *= prime64x3 - h ^= h >> 32 - return h -} - -func resetVs64(seed uint64) (v1, v2, v3, v4 uint64) { - if seed == 0 { - return zero64x1, zero64x2, zero64x3, zero64x4 - } - return (seed + prime64x1 + prime64x2), (seed + prime64x2), (seed), (seed - prime64x1) -} - -// borrowed from cespare -func round64(h, v uint64) uint64 { - h += v * prime64x2 - h = rotl64_31(h) - h *= prime64x1 - return h -} - -func mergeRound64(h, v uint64) uint64 { - v = round64(0, v) - h ^= v - h = h*prime64x1 + prime64x4 - return h -} diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go b/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go deleted file mode 100644 index ae48e0c5ca7..00000000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go +++ /dev/null @@ -1,161 +0,0 @@ -package xxhash - -func u32(in []byte) uint32 { - return uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 -} - -func u64(in []byte) uint64 { - return uint64(in[0]) | uint64(in[1])<<8 | uint64(in[2])<<16 | uint64(in[3])<<24 | uint64(in[4])<<32 | uint64(in[5])<<40 | uint64(in[6])<<48 | uint64(in[7])<<56 -} - -// Checksum32S returns the checksum of the input bytes with the specific seed. -func Checksum32S(in []byte, seed uint32) (h uint32) { - var i int - - if len(in) > 15 { - var ( - v1 = seed + prime32x1 + prime32x2 - v2 = seed + prime32x2 - v3 = seed + 0 - v4 = seed - prime32x1 - ) - for ; i < len(in)-15; i += 16 { - in := in[i : i+16 : len(in)] - v1 += u32(in[0:4:len(in)]) * prime32x2 - v1 = rotl32_13(v1) * prime32x1 - - v2 += u32(in[4:8:len(in)]) * prime32x2 - v2 = rotl32_13(v2) * prime32x1 - - v3 += u32(in[8:12:len(in)]) * prime32x2 - v3 = rotl32_13(v3) * prime32x1 - - v4 += u32(in[12:16:len(in)]) * prime32x2 - v4 = rotl32_13(v4) * prime32x1 - } - - h = rotl32_1(v1) + rotl32_7(v2) + rotl32_12(v3) + rotl32_18(v4) - - } else { - h = seed + prime32x5 - } - - h += uint32(len(in)) - for ; i <= len(in)-4; i += 4 { - in := in[i : i+4 : len(in)] - h += u32(in[0:4:len(in)]) * prime32x3 - h = rotl32_17(h) * prime32x4 - } - - for ; i < len(in); i++ { - h += uint32(in[i]) * prime32x5 - h = rotl32_11(h) * prime32x1 - } - - h ^= h >> 15 - h *= prime32x2 - h ^= h >> 13 - h *= prime32x3 - h ^= h >> 16 - - return -} - -func (xx *XXHash32) Write(in []byte) (n int, err error) { - i, ml := 0, int(xx.memIdx) - n = len(in) - xx.ln += int32(n) - - if d := 16 - ml; ml > 0 && ml+len(in) > 16 { - xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[:d])) - ml, in = 16, in[d:len(in):len(in)] - } else if ml+len(in) < 16 { - xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in)) - return - } - - if ml > 0 { - i += 16 - ml - xx.memIdx += int32(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in)) - in := xx.mem[:16:len(xx.mem)] - - xx.v1 += u32(in[0:4:len(in)]) * prime32x2 - xx.v1 = rotl32_13(xx.v1) * prime32x1 - - xx.v2 += u32(in[4:8:len(in)]) * prime32x2 - xx.v2 = rotl32_13(xx.v2) * prime32x1 - - xx.v3 += u32(in[8:12:len(in)]) * prime32x2 - xx.v3 = rotl32_13(xx.v3) * prime32x1 - - xx.v4 += u32(in[12:16:len(in)]) * prime32x2 - xx.v4 = rotl32_13(xx.v4) * prime32x1 - - xx.memIdx = 0 - } - - for ; i <= len(in)-16; i += 16 { - in := in[i : i+16 : len(in)] - xx.v1 += u32(in[0:4:len(in)]) * prime32x2 - xx.v1 = rotl32_13(xx.v1) * prime32x1 - - xx.v2 += u32(in[4:8:len(in)]) * prime32x2 - xx.v2 = rotl32_13(xx.v2) * prime32x1 - - xx.v3 += u32(in[8:12:len(in)]) * prime32x2 - xx.v3 = rotl32_13(xx.v3) * prime32x1 - - xx.v4 += u32(in[12:16:len(in)]) * prime32x2 - xx.v4 = rotl32_13(xx.v4) * prime32x1 - } - - if len(in)-i != 0 { - xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)])) - } - - return -} - -func (xx *XXHash32) Sum32() (h uint32) { - var i int32 - if xx.ln > 15 { - h = rotl32_1(xx.v1) + rotl32_7(xx.v2) + rotl32_12(xx.v3) + rotl32_18(xx.v4) - } else { - h = xx.seed + prime32x5 - } - - h += uint32(xx.ln) - - if xx.memIdx > 0 { - for ; i < xx.memIdx-3; i += 4 { - in := xx.mem[i : i+4 : len(xx.mem)] - h += u32(in[0:4:len(in)]) * prime32x3 - h = rotl32_17(h) * prime32x4 - } - - for ; i < xx.memIdx; i++ { - h += uint32(xx.mem[i]) * prime32x5 - h = rotl32_11(h) * prime32x1 - } - } - h ^= h >> 15 - h *= prime32x2 - h ^= h >> 13 - h *= prime32x3 - h ^= h >> 16 - - return -} - -// Checksum64S returns the 64bit xxhash checksum for a single input -func Checksum64S(in []byte, seed uint64) uint64 { - if len(in) == 0 && seed == 0 { - return 0xef46db3751d8e999 - } - - if len(in) > 31 { - return checksum64(in, seed) - } - - return checksum64Short(in, seed) -} diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go deleted file mode 100644 index e92ec29e025..00000000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go +++ /dev/null @@ -1,183 +0,0 @@ -// +build appengine safe ppc64le ppc64be mipsle mips s390x - -package xxhash - -// Backend returns the current version of xxhash being used. -const Backend = "GoSafe" - -func ChecksumString32S(s string, seed uint32) uint32 { - return Checksum32S([]byte(s), seed) -} - -func (xx *XXHash32) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - return xx.Write([]byte(s)) -} - -func ChecksumString64S(s string, seed uint64) uint64 { - return Checksum64S([]byte(s), seed) -} - -func (xx *XXHash64) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - return xx.Write([]byte(s)) -} - -func checksum64(in []byte, seed uint64) (h uint64) { - var ( - v1, v2, v3, v4 = resetVs64(seed) - - i int - ) - - for ; i < len(in)-31; i += 32 { - in := in[i : i+32 : len(in)] - v1 = round64(v1, u64(in[0:8:len(in)])) - v2 = round64(v2, u64(in[8:16:len(in)])) - v3 = round64(v3, u64(in[16:24:len(in)])) - v4 = round64(v4, u64(in[24:32:len(in)])) - } - - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - - h += uint64(len(in)) - - for ; i < len(in)-7; i += 8 { - h ^= round64(0, u64(in[i:len(in):len(in)])) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - for ; i < len(in)-3; i += 4 { - h ^= uint64(u32(in[i:len(in):len(in)])) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - } - - for ; i < len(in); i++ { - h ^= uint64(in[i]) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -func checksum64Short(in []byte, seed uint64) uint64 { - var ( - h = seed + prime64x5 + uint64(len(in)) - i int - ) - - for ; i < len(in)-7; i += 8 { - k := u64(in[i : i+8 : len(in)]) - h ^= round64(0, k) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - for ; i < len(in)-3; i += 4 { - h ^= uint64(u32(in[i:i+4:len(in)])) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - } - - for ; i < len(in); i++ { - h ^= uint64(in[i]) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -func (xx *XXHash64) Write(in []byte) (n int, err error) { - var ( - ml = int(xx.memIdx) - d = 32 - ml - ) - - n = len(in) - xx.ln += uint64(n) - - if ml+len(in) < 32 { - xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in)) - return - } - - i, v1, v2, v3, v4 := 0, xx.v1, xx.v2, xx.v3, xx.v4 - if ml > 0 && ml+len(in) > 32 { - xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in[:d:len(in)])) - in = in[d:len(in):len(in)] - - in := xx.mem[0:32:len(xx.mem)] - - v1 = round64(v1, u64(in[0:8:len(in)])) - v2 = round64(v2, u64(in[8:16:len(in)])) - v3 = round64(v3, u64(in[16:24:len(in)])) - v4 = round64(v4, u64(in[24:32:len(in)])) - - xx.memIdx = 0 - } - - for ; i < len(in)-31; i += 32 { - in := in[i : i+32 : len(in)] - v1 = round64(v1, u64(in[0:8:len(in)])) - v2 = round64(v2, u64(in[8:16:len(in)])) - v3 = round64(v3, u64(in[16:24:len(in)])) - v4 = round64(v4, u64(in[24:32:len(in)])) - } - - if len(in)-i != 0 { - xx.memIdx += int8(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)])) - } - - xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4 - - return -} - -func (xx *XXHash64) Sum64() (h uint64) { - var i int - if xx.ln > 31 { - v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4 - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - } else { - h = xx.seed + prime64x5 - } - - h += uint64(xx.ln) - if xx.memIdx > 0 { - in := xx.mem[:xx.memIdx] - for ; i < int(xx.memIdx)-7; i += 8 { - in := in[i : i+8 : len(in)] - k := u64(in[0:8:len(in)]) - k *= prime64x2 - k = rotl64_31(k) - k *= prime64x1 - h ^= k - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - for ; i < int(xx.memIdx)-3; i += 4 { - in := in[i : i+4 : len(in)] - h ^= uint64(u32(in[0:4:len(in)])) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - } - - for ; i < int(xx.memIdx); i++ { - h ^= uint64(in[i]) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - } - - return mix64(h) -} diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go deleted file mode 100644 index 1e2b5e8f1ff..00000000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go +++ /dev/null @@ -1,240 +0,0 @@ -// +build !safe -// +build !appengine -// +build !ppc64le -// +build !mipsle -// +build !ppc64be -// +build !mips -// +build !s390x - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Backend returns the current version of xxhash being used. -const Backend = "GoUnsafe" - -// ChecksumString32S returns the checksum of the input data, without creating a copy, with the specific seed. -func ChecksumString32S(s string, seed uint32) uint32 { - if len(s) == 0 { - return Checksum32S(nil, seed) - } - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return Checksum32S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed) -} - -func (xx *XXHash32) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)]) -} - -// ChecksumString64S returns the checksum of the input data, without creating a copy, with the specific seed. -func ChecksumString64S(s string, seed uint64) uint64 { - if len(s) == 0 { - return Checksum64S(nil, seed) - } - - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return Checksum64S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed) -} - -func (xx *XXHash64) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)]) -} - -//go:nocheckptr -func checksum64(in []byte, seed uint64) uint64 { - var ( - wordsLen = len(in) >> 3 - words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen] - - v1, v2, v3, v4 = resetVs64(seed) - - h uint64 - i int - ) - - for ; i < len(words)-3; i += 4 { - words := (*[4]uint64)(unsafe.Pointer(&words[i])) - - v1 = round64(v1, words[0]) - v2 = round64(v2, words[1]) - v3 = round64(v3, words[2]) - v4 = round64(v4, words[3]) - } - - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - - h += uint64(len(in)) - - for _, k := range words[i:] { - h ^= round64(0, k) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 { - words := (*[1]uint32)(unsafe.Pointer(&in[0])) - h ^= uint64(words[0]) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - - in = in[4:len(in):len(in)] - } - - for _, b := range in { - h ^= uint64(b) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -//go:nocheckptr -func checksum64Short(in []byte, seed uint64) uint64 { - var ( - h = seed + prime64x5 + uint64(len(in)) - i int - ) - - if len(in) > 7 { - var ( - wordsLen = len(in) >> 3 - words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen] - ) - - for i := range words { - h ^= round64(0, words[i]) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - i = wordsLen << 3 - } - - if in = in[i:len(in):len(in)]; len(in) > 3 { - words := (*[1]uint32)(unsafe.Pointer(&in[0])) - h ^= uint64(words[0]) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - - in = in[4:len(in):len(in)] - } - - for _, b := range in { - h ^= uint64(b) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -func (xx *XXHash64) Write(in []byte) (n int, err error) { - mem, idx := xx.mem[:], int(xx.memIdx) - - xx.ln, n = xx.ln+uint64(len(in)), len(in) - - if idx+len(in) < 32 { - xx.memIdx += int8(copy(mem[idx:len(mem):len(mem)], in)) - return - } - - var ( - v1, v2, v3, v4 = xx.v1, xx.v2, xx.v3, xx.v4 - - i int - ) - - if d := 32 - int(idx); d > 0 && int(idx)+len(in) > 31 { - copy(mem[idx:len(mem):len(mem)], in[:len(in):len(in)]) - - words := (*[4]uint64)(unsafe.Pointer(&mem[0])) - - v1 = round64(v1, words[0]) - v2 = round64(v2, words[1]) - v3 = round64(v3, words[2]) - v4 = round64(v4, words[3]) - - if in, xx.memIdx = in[d:len(in):len(in)], 0; len(in) == 0 { - goto RET - } - } - - for ; i < len(in)-31; i += 32 { - words := (*[4]uint64)(unsafe.Pointer(&in[i])) - - v1 = round64(v1, words[0]) - v2 = round64(v2, words[1]) - v3 = round64(v3, words[2]) - v4 = round64(v4, words[3]) - } - - if len(in)-i != 0 { - xx.memIdx += int8(copy(mem[xx.memIdx:len(mem):len(mem)], in[i:len(in):len(in)])) - } - -RET: - xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4 - - return -} - -func (xx *XXHash64) Sum64() (h uint64) { - if seed := xx.seed; xx.ln > 31 { - v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4 - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - } else if seed == 0 { - h = prime64x5 - } else { - h = seed + prime64x5 - } - - h += uint64(xx.ln) - - if xx.memIdx == 0 { - return mix64(h) - } - - var ( - in = xx.mem[:xx.memIdx:xx.memIdx] - wordsLen = len(in) >> 3 - words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen] - ) - - for _, k := range words { - h ^= round64(0, k) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 { - words := (*[1]uint32)(unsafe.Pointer(&in[0])) - - h ^= uint64(words[0]) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - - in = in[4:len(in):len(in)] - } - - for _, b := range in { - h ^= uint64(b) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 9d6c473fdc0..96a80c99b8a 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -104,7 +104,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering node.Parent.Prev.Type == blackfriday.Heading && node.Parent.Prev.FirstChild != nil && bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) { - before, after, found := bytes.Cut(node.Literal, []byte(" - ")) + before, after, found := bytesCut(node.Literal, []byte(" - ")) escapeSpecialChars(w, before) if found { out(w, ` \- `) @@ -406,3 +406,12 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) { w.Write([]byte{'\\', text[i]}) // nolint: errcheck } } + +// bytesCut is a copy of [bytes.Cut] to provide compatibility with go1.17 +// and older. We can remove this once we drop support for go1.17 and older. +func bytesCut(s, sep []byte) (before, after []byte, found bool) { + if i := bytes.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, nil, false +} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b3c..f4e7dbf37b3 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0a..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b12..daea9dd6d6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e57575496..fa854785d0f 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d51..e4ac2a2fffd 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd8e..c349c326c71 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e401..36c311694cd 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a07..d8de5ab76fd 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c015f..5eb5dbc66f2 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d613..c54a6308383 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc4999..0760efe9160 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 00000000000..b0eab10090d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 00000000000..928319fb09a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 00000000000..3186b0c3491 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 00000000000..f69fdb930f5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 00000000000..607e683bd73 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 00000000000..35c734be431 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 00000000000..e5b3b6f6943 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 00000000000..1dd455bc5a4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 00000000000..f1b2e73bd5b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 00000000000..52bf4ce53b5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 00000000000..547df1df84b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 00000000000..7daa45e19ee --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 00000000000..30976ce9739 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 00000000000..37dfeddc289 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 00000000000..a72c6495490 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae6539..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b8855..f65e8fe3edc 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78fe..a29fc7aab62 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go index 7064cd96aed..738c9754a61 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.36.0 // protoc (unknown) // source: protoc-gen-openapiv2/options/annotations.proto +//go:build !protoopaque + package options import ( @@ -45,6 +47,14 @@ var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.E Tag: "bytes,1042,opt,name=openapiv2_schema", Filename: "protoc-gen-openapiv2/options/annotations.proto", }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*EnumSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum", + Tag: "bytes,1042,opt,name=openapiv2_enum", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, { ExtendedType: (*descriptorpb.ServiceOptions)(nil), ExtensionType: (*Tag)(nil), @@ -96,6 +106,17 @@ var ( E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2] ) +// Extension fields to descriptorpb.EnumOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042; + E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3] +) + // Extension fields to descriptorpb.ServiceOptions. var ( // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. @@ -104,7 +125,7 @@ var ( // different descriptor messages. // // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042; - E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3] + E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4] ) // Extension fields to descriptorpb.FieldOptions. @@ -115,7 +136,7 @@ var ( // different descriptor messages. // // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042; - E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4] + E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5] ) var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor @@ -156,56 +177,68 @@ var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, - 0x32, 0x5f, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, - 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, - 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, - 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, - 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, - 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75, + 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74, + 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, + 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{ (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions - (*descriptorpb.ServiceOptions)(nil), // 3: google.protobuf.ServiceOptions - (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions - (*Swagger)(nil), // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger - (*Operation)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Operation - (*Schema)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Schema - (*Tag)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Tag - (*JSONSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions + (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions + (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema } var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{ 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions - 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions - 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions - 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger - 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation - 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema - 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag - 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema - 10, // [10:10] is the sub-list for method output_type - 10, // [10:10] is the sub-list for method input_type - 5, // [5:10] is the sub-list for extension type_name - 0, // [0:5] is the sub-list for extension extendee + 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions + 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions + 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions + 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger + 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation + 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 6, // [6:12] is the sub-list for extension type_name + 0, // [0:6] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name } @@ -222,7 +255,7 @@ func file_protoc_gen_openapiv2_options_annotations_proto_init() { RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc, NumEnums: 0, NumMessages: 0, - NumExtensions: 5, + NumExtensions: 6, NumServices: 0, }, GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto index d63d3c87eb3..aecc5e709c3 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto @@ -28,6 +28,13 @@ extend google.protobuf.MessageOptions { // different descriptor messages. Schema openapiv2_schema = 1042; } +extend google.protobuf.EnumOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + EnumSchema openapiv2_enum = 1042; +} extend google.protobuf.ServiceOptions { // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. // diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json deleted file mode 100644 index 8efd122ca62..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "protoc-gen-openapiv2/options/annotations.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": {}, - "definitions": { - "protobufAny": { - "type": "object", - "properties": { - "@type": { - "type": "string" - } - }, - "additionalProperties": {} - }, - "rpcStatus": { - "type": "object", - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go new file mode 100644 index 00000000000..b570167836d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go @@ -0,0 +1,269 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/annotations.proto + +//go:build protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Swagger)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger", + Tag: "bytes,1042,opt,name=openapiv2_swagger", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation", + Tag: "bytes,1042,opt,name=openapiv2_operation", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema", + Tag: "bytes,1042,opt,name=openapiv2_schema", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*EnumSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum", + Tag: "bytes,1042,opt,name=openapiv2_enum", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*Tag)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag", + Tag: "bytes,1042,opt,name=openapiv2_tag", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*JSONSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field", + Tag: "bytes,1042,opt,name=openapiv2_field", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042; + E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042; + E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042; + E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.EnumOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042; + E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042; + E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042; + E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5] +) + +var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75, + 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74, + 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, + 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions + (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions + (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema +} +var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{ + 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions + 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions + 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions + 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions + 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions + 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions + 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger + 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation + 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 6, // [6:12] is the sub-list for extension type_name + 0, // [0:6] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() } +func file_protoc_gen_openapiv2_options_annotations_proto_init() { + if File_protoc_gen_openapiv2_options_annotations_proto != nil { + return + } + file_protoc_gen_openapiv2_options_openapiv2_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 6, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs, + ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes, + }.Build() + File_protoc_gen_openapiv2_options_annotations_proto = out.File + file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil + file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml new file mode 100644 index 00000000000..07dfb958f1e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml @@ -0,0 +1,7 @@ +version: v2 +plugins: + - remote: buf.build/protocolbuffers/go:v1.36.0 + out: . + opt: + - paths=source_relative + - default_api_level=API_HYBRID diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go index 97b3240bc1a..3a34e664e0a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.36.0 // protoc (unknown) // source: protoc-gen-openapiv2/options/openapiv2.proto +//go:build !protoopaque + package options import ( @@ -11,7 +13,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" - sync "sync" ) const ( @@ -73,11 +74,6 @@ func (x Scheme) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use Scheme.Descriptor instead. -func (Scheme) EnumDescriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{0} -} - // `Type` is a supported HTTP header type. // See https://swagger.io/specification/v2/#parameterType. type HeaderParameter_Type int32 @@ -130,11 +126,6 @@ func (x HeaderParameter_Type) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use HeaderParameter_Type.Descriptor instead. -func (HeaderParameter_Type) EnumDescriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{3, 0} -} - type JSONSchema_JSONSchemaSimpleTypes int32 const ( @@ -194,11 +185,6 @@ func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use JSONSchema_JSONSchemaSimpleTypes.Descriptor instead. -func (JSONSchema_JSONSchemaSimpleTypes) EnumDescriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{11, 0} -} - // The type of the security scheme. Valid values are "basic", // "apiKey" or "oauth2". type SecurityScheme_Type int32 @@ -248,11 +234,6 @@ func (x SecurityScheme_Type) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use SecurityScheme_Type.Descriptor instead. -func (SecurityScheme_Type) EnumDescriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{14, 0} -} - // The location of the API key. Valid values are "query" or "header". type SecurityScheme_In int32 @@ -298,11 +279,6 @@ func (x SecurityScheme_In) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use SecurityScheme_In.Descriptor instead. -func (SecurityScheme_In) EnumDescriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{14, 1} -} - // The flow used by the OAuth2 security scheme. Valid values are // "implicit", "password", "application" or "accessCode". type SecurityScheme_Flow int32 @@ -355,11 +331,6 @@ func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use SecurityScheme_Flow.Descriptor instead. -func (SecurityScheme_Flow) EnumDescriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{14, 2} -} - // `Swagger` is a representation of OpenAPI v2 specification's Swagger object. // // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject @@ -386,10 +357,7 @@ func (SecurityScheme_Flow) EnumDescriptor() ([]byte, []int) { // produces: "application/json"; // }; type Swagger struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // Specifies the OpenAPI Specification version being used. It can be // used by the OpenAPI UI and other clients to interpret the API listing. The // value MUST be "2.0". @@ -426,7 +394,7 @@ type Swagger struct { Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` // An object to hold responses that can be used across operations. This // property does not define global responses for all operations. - Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Security scheme definitions that can be used across the specification. SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` // A declaration of which security schemes are applied for the API as a whole. @@ -442,7 +410,9 @@ type Swagger struct { // Custom properties that start with "x-" such as "x-foo" used to describe // extra functionality that is not covered by the standard OpenAPI Specification. // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Swagger) Reset() { @@ -470,11 +440,6 @@ func (x *Swagger) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Swagger.ProtoReflect.Descriptor instead. -func (*Swagger) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{0} -} - func (x *Swagger) GetSwagger() string { if x != nil { return x.Swagger @@ -566,6 +531,169 @@ func (x *Swagger) GetExtensions() map[string]*structpb.Value { return nil } +func (x *Swagger) SetSwagger(v string) { + x.Swagger = v +} + +func (x *Swagger) SetInfo(v *Info) { + x.Info = v +} + +func (x *Swagger) SetHost(v string) { + x.Host = v +} + +func (x *Swagger) SetBasePath(v string) { + x.BasePath = v +} + +func (x *Swagger) SetSchemes(v []Scheme) { + x.Schemes = v +} + +func (x *Swagger) SetConsumes(v []string) { + x.Consumes = v +} + +func (x *Swagger) SetProduces(v []string) { + x.Produces = v +} + +func (x *Swagger) SetResponses(v map[string]*Response) { + x.Responses = v +} + +func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) { + x.SecurityDefinitions = v +} + +func (x *Swagger) SetSecurity(v []*SecurityRequirement) { + x.Security = v +} + +func (x *Swagger) SetTags(v []*Tag) { + x.Tags = v +} + +func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Swagger) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Swagger) HasInfo() bool { + if x == nil { + return false + } + return x.Info != nil +} + +func (x *Swagger) HasSecurityDefinitions() bool { + if x == nil { + return false + } + return x.SecurityDefinitions != nil +} + +func (x *Swagger) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Swagger) ClearInfo() { + x.Info = nil +} + +func (x *Swagger) ClearSecurityDefinitions() { + x.SecurityDefinitions = nil +} + +func (x *Swagger) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type Swagger_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + Swagger string + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info *Info + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + Host string + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + BasePath string + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + Schemes []Scheme + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Consumes []string + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Produces []string + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + Responses map[string]*Response + // Security scheme definitions that can be used across the specification. + SecurityDefinitions *SecurityDefinitions + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + Security []*SecurityRequirement + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []*Tag + // Additional external documentation. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Swagger_builder) Build() *Swagger { + m0 := &Swagger{} + b, x := &b0, m0 + _, _ = b, x + x.Swagger = b.Swagger + x.Info = b.Info + x.Host = b.Host + x.BasePath = b.BasePath + x.Schemes = b.Schemes + x.Consumes = b.Consumes + x.Produces = b.Produces + x.Responses = b.Responses + x.SecurityDefinitions = b.SecurityDefinitions + x.Security = b.Security + x.Tags = b.Tags + x.ExternalDocs = b.ExternalDocs + x.Extensions = b.Extensions + return m0 +} + // `Operation` is a representation of OpenAPI v2 specification's Operation object. // // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject @@ -592,10 +720,7 @@ func (x *Swagger) GetExtensions() map[string]*structpb.Value { // } // } type Operation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // A list of tags for API documentation control. Tags can be used for logical // grouping of operations by resources or any other qualifier. Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` @@ -622,7 +747,7 @@ type Operation struct { Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` // The list of possible responses as they are returned from executing this // operation. - Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // The transfer protocol for the operation. Values MUST be from the list: // "http", "https", "ws", "wss". The value overrides the OpenAPI Object // schemes definition. @@ -639,11 +764,13 @@ type Operation struct { // Custom properties that start with "x-" such as "x-foo" used to describe // extra functionality that is not covered by the standard OpenAPI Specification. // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Custom parameters such as HTTP request headers. // See: https://swagger.io/docs/specification/2-0/describing-parameters/ // and https://swagger.io/specification/v2/#parameter-object. - Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"` + Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Operation) Reset() { @@ -671,11 +798,6 @@ func (x *Operation) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Operation.ProtoReflect.Descriptor instead. -func (*Operation) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{1} -} - func (x *Operation) GetTags() []string { if x != nil { return x.Tags @@ -767,19 +889,165 @@ func (x *Operation) GetParameters() *Parameters { return nil } +func (x *Operation) SetTags(v []string) { + x.Tags = v +} + +func (x *Operation) SetSummary(v string) { + x.Summary = v +} + +func (x *Operation) SetDescription(v string) { + x.Description = v +} + +func (x *Operation) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Operation) SetOperationId(v string) { + x.OperationId = v +} + +func (x *Operation) SetConsumes(v []string) { + x.Consumes = v +} + +func (x *Operation) SetProduces(v []string) { + x.Produces = v +} + +func (x *Operation) SetResponses(v map[string]*Response) { + x.Responses = v +} + +func (x *Operation) SetSchemes(v []Scheme) { + x.Schemes = v +} + +func (x *Operation) SetDeprecated(v bool) { + x.Deprecated = v +} + +func (x *Operation) SetSecurity(v []*SecurityRequirement) { + x.Security = v +} + +func (x *Operation) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Operation) SetParameters(v *Parameters) { + x.Parameters = v +} + +func (x *Operation) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Operation) HasParameters() bool { + if x == nil { + return false + } + return x.Parameters != nil +} + +func (x *Operation) ClearExternalDocs() { + x.ExternalDocs = nil +} + +func (x *Operation) ClearParameters() { + x.Parameters = nil +} + +type Operation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []string + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + Summary string + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + Description string + // Additional external documentation for this operation. + ExternalDocs *ExternalDocumentation + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + OperationId string + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Consumes []string + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Produces []string + // The list of possible responses as they are returned from executing this + // operation. + Responses map[string]*Response + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + Schemes []Scheme + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + Deprecated bool + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + Security []*SecurityRequirement + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters *Parameters +} + +func (b0 Operation_builder) Build() *Operation { + m0 := &Operation{} + b, x := &b0, m0 + _, _ = b, x + x.Tags = b.Tags + x.Summary = b.Summary + x.Description = b.Description + x.ExternalDocs = b.ExternalDocs + x.OperationId = b.OperationId + x.Consumes = b.Consumes + x.Produces = b.Produces + x.Responses = b.Responses + x.Schemes = b.Schemes + x.Deprecated = b.Deprecated + x.Security = b.Security + x.Extensions = b.Extensions + x.Parameters = b.Parameters + return m0 +} + // `Parameters` is a representation of OpenAPI v2 specification's parameters object. // Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only // allow header parameters to be set here since we do not want users specifying custom non-header // parameters beyond those inferred from the Protobuf schema. // See: https://swagger.io/specification/v2/#parameter-object type Parameters struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // `Headers` is one or more HTTP header parameter. // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters - Headers []*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + Headers []*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Parameters) Reset() { @@ -807,11 +1075,6 @@ func (x *Parameters) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Parameters.ProtoReflect.Descriptor instead. -func (*Parameters) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{2} -} - func (x *Parameters) GetHeaders() []*HeaderParameter { if x != nil { return x.Headers @@ -819,13 +1082,30 @@ func (x *Parameters) GetHeaders() []*HeaderParameter { return nil } +func (x *Parameters) SetHeaders(v []*HeaderParameter) { + x.Headers = v +} + +type Parameters_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + Headers []*HeaderParameter +} + +func (b0 Parameters_builder) Build() *Parameters { + m0 := &Parameters{} + b, x := &b0, m0 + _, _ = b, x + x.Headers = b.Headers + return m0 +} + // `HeaderParameter` a HTTP header parameter. // See: https://swagger.io/specification/v2/#parameter-object type HeaderParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // `Name` is the header name. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // `Description` is a short description of the header. @@ -836,7 +1116,9 @@ type HeaderParameter struct { // `Format` The extending format for the previously mentioned type. Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"` // `Required` indicates if the header is optional - Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"` + Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HeaderParameter) Reset() { @@ -864,11 +1146,6 @@ func (x *HeaderParameter) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HeaderParameter.ProtoReflect.Descriptor instead. -func (*HeaderParameter) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{3} -} - func (x *HeaderParameter) GetName() string { if x != nil { return x.Name @@ -904,14 +1181,59 @@ func (x *HeaderParameter) GetRequired() bool { return false } +func (x *HeaderParameter) SetName(v string) { + x.Name = v +} + +func (x *HeaderParameter) SetDescription(v string) { + x.Description = v +} + +func (x *HeaderParameter) SetType(v HeaderParameter_Type) { + x.Type = v +} + +func (x *HeaderParameter) SetFormat(v string) { + x.Format = v +} + +func (x *HeaderParameter) SetRequired(v bool) { + x.Required = v +} + +type HeaderParameter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Name` is the header name. + Name string + // `Description` is a short description of the header. + Description string + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type HeaderParameter_Type + // `Format` The extending format for the previously mentioned type. + Format string + // `Required` indicates if the header is optional + Required bool +} + +func (b0 HeaderParameter_builder) Build() *HeaderParameter { + m0 := &HeaderParameter{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Description = b.Description + x.Type = b.Type + x.Format = b.Format + x.Required = b.Required + return m0 +} + // `Header` is a representation of OpenAPI v2 specification's Header object. // // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject type Header struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // `Description` is a short description of the header. Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. @@ -923,7 +1245,9 @@ type Header struct { // Unlike JSON Schema this value MUST conform to the defined type for the header. Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"` // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. - Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Header) Reset() { @@ -951,11 +1275,6 @@ func (x *Header) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Header.ProtoReflect.Descriptor instead. -func (*Header) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{4} -} - func (x *Header) GetDescription() string { if x != nil { return x.Description @@ -991,31 +1310,79 @@ func (x *Header) GetPattern() string { return "" } -// `Response` is a representation of OpenAPI v2 specification's Response object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject -type Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *Header) SetDescription(v string) { + x.Description = v +} - // `Description` is a short description of the response. - // GFM syntax can be used for rich text representation. - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // `Schema` optionally defines the structure of the response. - // If `Schema` is not provided, it means there is no content to the response. - Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` - // `Headers` A list of headers that are sent with the response. - // `Header` name is expected to be a string in the canonical format of the MIME header key - // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey - Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // `Examples` gives per-mimetype response examples. - // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object - Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Custom properties that start with "x-" such as "x-foo" used to describe - // extra functionality that is not covered by the standard OpenAPI Specification. +func (x *Header) SetType(v string) { + x.Type = v +} + +func (x *Header) SetFormat(v string) { + x.Format = v +} + +func (x *Header) SetDefault(v string) { + x.Default = v +} + +func (x *Header) SetPattern(v string) { + x.Pattern = v +} + +type Header_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the header. + Description string + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + Type string + // `Format` The extending format for the previously mentioned type. + Format string + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + Default string + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + Pattern string +} + +func (b0 Header_builder) Build() *Header { + m0 := &Header{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Type = b.Type + x.Format = b.Format + x.Default = b.Default + x.Pattern = b.Pattern + return m0 +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +type Response struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Response) Reset() { @@ -1043,11 +1410,6 @@ func (x *Response) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Response.ProtoReflect.Descriptor instead. -func (*Response) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{5} -} - func (x *Response) GetDescription() string { if x != nil { return x.Description @@ -1083,6 +1445,71 @@ func (x *Response) GetExtensions() map[string]*structpb.Value { return nil } +func (x *Response) SetDescription(v string) { + x.Description = v +} + +func (x *Response) SetSchema(v *Schema) { + x.Schema = v +} + +func (x *Response) SetHeaders(v map[string]*Header) { + x.Headers = v +} + +func (x *Response) SetExamples(v map[string]string) { + x.Examples = v +} + +func (x *Response) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Response) HasSchema() bool { + if x == nil { + return false + } + return x.Schema != nil +} + +func (x *Response) ClearSchema() { + x.Schema = nil +} + +type Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Response_builder) Build() *Response { + m0 := &Response{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Schema = b.Schema + x.Headers = b.Headers + x.Examples = b.Examples + x.Extensions = b.Extensions + return m0 +} + // `Info` is a representation of OpenAPI v2 specification's Info object. // // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject @@ -1107,10 +1534,7 @@ func (x *Response) GetExtensions() map[string]*structpb.Value { // ... // }; type Info struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // The title of the application. Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` // A short description of the application. GFM syntax can be used for rich @@ -1128,7 +1552,9 @@ type Info struct { // Custom properties that start with "x-" such as "x-foo" used to describe // extra functionality that is not covered by the standard OpenAPI Specification. // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Info) Reset() { @@ -1156,11 +1582,6 @@ func (x *Info) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Info.ProtoReflect.Descriptor instead. -func (*Info) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{6} -} - func (x *Info) GetTitle() string { if x != nil { return x.Title @@ -1210,6 +1631,93 @@ func (x *Info) GetExtensions() map[string]*structpb.Value { return nil } +func (x *Info) SetTitle(v string) { + x.Title = v +} + +func (x *Info) SetDescription(v string) { + x.Description = v +} + +func (x *Info) SetTermsOfService(v string) { + x.TermsOfService = v +} + +func (x *Info) SetContact(v *Contact) { + x.Contact = v +} + +func (x *Info) SetLicense(v *License) { + x.License = v +} + +func (x *Info) SetVersion(v string) { + x.Version = v +} + +func (x *Info) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Info) HasContact() bool { + if x == nil { + return false + } + return x.Contact != nil +} + +func (x *Info) HasLicense() bool { + if x == nil { + return false + } + return x.License != nil +} + +func (x *Info) ClearContact() { + x.Contact = nil +} + +func (x *Info) ClearLicense() { + x.License = nil +} + +type Info_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The title of the application. + Title string + // A short description of the application. GFM syntax can be used for rich + // text representation. + Description string + // The Terms of Service for the API. + TermsOfService string + // The contact information for the exposed API. + Contact *Contact + // The license information for the exposed API. + License *License + // Provides the version of the application API (not to be confused + // with the specification version). + Version string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Info_builder) Build() *Info { + m0 := &Info{} + b, x := &b0, m0 + _, _ = b, x + x.Title = b.Title + x.Description = b.Description + x.TermsOfService = b.TermsOfService + x.Contact = b.Contact + x.License = b.License + x.Version = b.Version + x.Extensions = b.Extensions + return m0 +} + // `Contact` is a representation of OpenAPI v2 specification's Contact object. // // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject @@ -1229,10 +1737,7 @@ func (x *Info) GetExtensions() map[string]*structpb.Value { // ... // }; type Contact struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // The identifying name of the contact person/organization. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the contact information. MUST be in the format of a @@ -1240,7 +1745,9 @@ type Contact struct { Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` // The email address of the contact person/organization. MUST be in the format // of an email address. - Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Contact) Reset() { @@ -1268,11 +1775,6 @@ func (x *Contact) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Contact.ProtoReflect.Descriptor instead. -func (*Contact) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{7} -} - func (x *Contact) GetName() string { if x != nil { return x.Name @@ -1294,6 +1796,41 @@ func (x *Contact) GetEmail() string { return "" } +func (x *Contact) SetName(v string) { + x.Name = v +} + +func (x *Contact) SetUrl(v string) { + x.Url = v +} + +func (x *Contact) SetEmail(v string) { + x.Email = v +} + +type Contact_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The identifying name of the contact person/organization. + Name string + // The URL pointing to the contact information. MUST be in the format of a + // URL. + Url string + // The email address of the contact person/organization. MUST be in the format + // of an email address. + Email string +} + +func (b0 Contact_builder) Build() *Contact { + m0 := &Contact{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Url = b.Url + x.Email = b.Email + return m0 +} + // `License` is a representation of OpenAPI v2 specification's License object. // // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject @@ -1312,14 +1849,13 @@ func (x *Contact) GetEmail() string { // ... // }; type License struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // The license name used for the API. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // A URL to the license used for the API. MUST be in the format of a URL. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *License) Reset() { @@ -1347,11 +1883,6 @@ func (x *License) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use License.ProtoReflect.Descriptor instead. -func (*License) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{8} -} - func (x *License) GetName() string { if x != nil { return x.Name @@ -1366,6 +1897,32 @@ func (x *License) GetUrl() string { return "" } +func (x *License) SetName(v string) { + x.Name = v +} + +func (x *License) SetUrl(v string) { + x.Url = v +} + +type License_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The license name used for the API. + Name string + // A URL to the license used for the API. MUST be in the format of a URL. + Url string +} + +func (b0 License_builder) Build() *License { + m0 := &License{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Url = b.Url + return m0 +} + // `ExternalDocumentation` is a representation of OpenAPI v2 specification's // ExternalDocumentation object. // @@ -1382,16 +1939,15 @@ func (x *License) GetUrl() string { // ... // }; type ExternalDocumentation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // A short description of the target documentation. GFM syntax can be used for // rich text representation. Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // The URL for the target documentation. Value MUST be in the format // of a URL. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExternalDocumentation) Reset() { @@ -1419,11 +1975,6 @@ func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExternalDocumentation.ProtoReflect.Descriptor instead. -func (*ExternalDocumentation) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{9} -} - func (x *ExternalDocumentation) GetDescription() string { if x != nil { return x.Description @@ -1438,15 +1989,40 @@ func (x *ExternalDocumentation) GetUrl() string { return "" } +func (x *ExternalDocumentation) SetDescription(v string) { + x.Description = v +} + +func (x *ExternalDocumentation) SetUrl(v string) { + x.Url = v +} + +type ExternalDocumentation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + Description string + // The URL for the target documentation. Value MUST be in the format + // of a URL. + Url string +} + +func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation { + m0 := &ExternalDocumentation{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Url = b.Url + return m0 +} + // `Schema` is a representation of OpenAPI v2 specification's Schema object. // // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject type Schema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"` + state protoimpl.MessageState `protogen:"hybrid.v1"` + JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"` // Adds support for polymorphism. The discriminator is the schema property // name that is used to differentiate between other schema that inherit this // schema. The property name used MUST be defined at this schema and it MUST @@ -1463,7 +2039,9 @@ type Schema struct { ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` // A free-form property to include an example of an instance for this schema in JSON. // This is copied verbatim to the output. - Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"` + Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Schema) Reset() { @@ -1491,11 +2069,6 @@ func (x *Schema) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Schema.ProtoReflect.Descriptor instead. -func (*Schema) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{10} -} - func (x *Schema) GetJsonSchema() *JSONSchema { if x != nil { return x.JsonSchema @@ -1531,6 +2104,303 @@ func (x *Schema) GetExample() string { return "" } +func (x *Schema) SetJsonSchema(v *JSONSchema) { + x.JsonSchema = v +} + +func (x *Schema) SetDiscriminator(v string) { + x.Discriminator = v +} + +func (x *Schema) SetReadOnly(v bool) { + x.ReadOnly = v +} + +func (x *Schema) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Schema) SetExample(v string) { + x.Example = v +} + +func (x *Schema) HasJsonSchema() bool { + if x == nil { + return false + } + return x.JsonSchema != nil +} + +func (x *Schema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Schema) ClearJsonSchema() { + x.JsonSchema = nil +} + +func (x *Schema) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type Schema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + JsonSchema *JSONSchema + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + Discriminator string + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + Example string +} + +func (b0 Schema_builder) Build() *Schema { + m0 := &Schema{} + b, x := &b0, m0 + _, _ = b, x + x.JsonSchema = b.JsonSchema + x.Discriminator = b.Discriminator + x.ReadOnly = b.ReadOnly + x.ExternalDocs = b.ExternalDocs + x.Example = b.Example + return m0 +} + +// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object. +// Only fields that are applicable to Enums are included +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = { +// ... +// title: "MyEnum"; +// description:"This is my nice enum"; +// example: "ZERO"; +// required: true; +// ... +// }; +type EnumSchema struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A short description of the schema. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + // The title of the schema. + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"` + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnumSchema) Reset() { + *x = EnumSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnumSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumSchema) ProtoMessage() {} + +func (x *EnumSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EnumSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *EnumSchema) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *EnumSchema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *EnumSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *EnumSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *EnumSchema) GetExample() string { + if x != nil { + return x.Example + } + return "" +} + +func (x *EnumSchema) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +func (x *EnumSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *EnumSchema) SetDescription(v string) { + x.Description = v +} + +func (x *EnumSchema) SetDefault(v string) { + x.Default = v +} + +func (x *EnumSchema) SetTitle(v string) { + x.Title = v +} + +func (x *EnumSchema) SetRequired(v bool) { + x.Required = v +} + +func (x *EnumSchema) SetReadOnly(v bool) { + x.ReadOnly = v +} + +func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *EnumSchema) SetExample(v string) { + x.Example = v +} + +func (x *EnumSchema) SetRef(v string) { + x.Ref = v +} + +func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *EnumSchema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *EnumSchema) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type EnumSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the schema. + Description string + Default string + // The title of the schema. + Title string + Required bool + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + Example string + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 EnumSchema_builder) Build() *EnumSchema { + m0 := &EnumSchema{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Default = b.Default + x.Title = b.Title + x.Required = b.Required + x.ReadOnly = b.ReadOnly + x.ExternalDocs = b.ExternalDocs + x.Example = b.Example + x.Ref = b.Ref + x.Extensions = b.Extensions + return m0 +} + // `JSONSchema` represents properties from JSON Schema taken, and as used, in // the OpenAPI v2 spec. // @@ -1559,10 +2429,7 @@ func (x *Schema) GetExample() string { // }]; // } type JSONSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // Ref is used to define an external reference to include in the message. // This could be a fully qualified proto message reference, and that type must // be imported into the protofile. If no message is identified, the Ref will @@ -1611,12 +2478,14 @@ type JSONSchema struct { // Custom properties that start with "x-" such as "x-foo" used to describe // extra functionality that is not covered by the standard OpenAPI Specification. // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *JSONSchema) Reset() { *x = JSONSchema{} - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1628,7 +2497,7 @@ func (x *JSONSchema) String() string { func (*JSONSchema) ProtoMessage() {} func (x *JSONSchema) ProtoReflect() protoreflect.Message { - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1639,11 +2508,6 @@ func (x *JSONSchema) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use JSONSchema.ProtoReflect.Descriptor instead. -func (*JSONSchema) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{11} -} - func (x *JSONSchema) GetRef() string { if x != nil { return x.Ref @@ -1826,14 +2690,213 @@ func (x *JSONSchema) GetExtensions() map[string]*structpb.Value { return nil } +func (x *JSONSchema) SetRef(v string) { + x.Ref = v +} + +func (x *JSONSchema) SetTitle(v string) { + x.Title = v +} + +func (x *JSONSchema) SetDescription(v string) { + x.Description = v +} + +func (x *JSONSchema) SetDefault(v string) { + x.Default = v +} + +func (x *JSONSchema) SetReadOnly(v bool) { + x.ReadOnly = v +} + +func (x *JSONSchema) SetExample(v string) { + x.Example = v +} + +func (x *JSONSchema) SetMultipleOf(v float64) { + x.MultipleOf = v +} + +func (x *JSONSchema) SetMaximum(v float64) { + x.Maximum = v +} + +func (x *JSONSchema) SetExclusiveMaximum(v bool) { + x.ExclusiveMaximum = v +} + +func (x *JSONSchema) SetMinimum(v float64) { + x.Minimum = v +} + +func (x *JSONSchema) SetExclusiveMinimum(v bool) { + x.ExclusiveMinimum = v +} + +func (x *JSONSchema) SetMaxLength(v uint64) { + x.MaxLength = v +} + +func (x *JSONSchema) SetMinLength(v uint64) { + x.MinLength = v +} + +func (x *JSONSchema) SetPattern(v string) { + x.Pattern = v +} + +func (x *JSONSchema) SetMaxItems(v uint64) { + x.MaxItems = v +} + +func (x *JSONSchema) SetMinItems(v uint64) { + x.MinItems = v +} + +func (x *JSONSchema) SetUniqueItems(v bool) { + x.UniqueItems = v +} + +func (x *JSONSchema) SetMaxProperties(v uint64) { + x.MaxProperties = v +} + +func (x *JSONSchema) SetMinProperties(v uint64) { + x.MinProperties = v +} + +func (x *JSONSchema) SetRequired(v []string) { + x.Required = v +} + +func (x *JSONSchema) SetArray(v []string) { + x.Array = v +} + +func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) { + x.Type = v +} + +func (x *JSONSchema) SetFormat(v string) { + x.Format = v +} + +func (x *JSONSchema) SetEnum(v []string) { + x.Enum = v +} + +func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) { + x.FieldConfiguration = v +} + +func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *JSONSchema) HasFieldConfiguration() bool { + if x == nil { + return false + } + return x.FieldConfiguration != nil +} + +func (x *JSONSchema) ClearFieldConfiguration() { + x.FieldConfiguration = nil +} + +type JSONSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // The title of the schema. + Title string + // A short description of the schema. + Description string + Default string + ReadOnly bool + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + Example string + MultipleOf float64 + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + Maximum float64 + ExclusiveMaximum bool + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + Minimum float64 + ExclusiveMinimum bool + MaxLength uint64 + MinLength uint64 + Pattern string + MaxItems uint64 + MinItems uint64 + UniqueItems bool + MaxProperties uint64 + MinProperties uint64 + Required []string + // Items in 'array' must be unique. + Array []string + Type []JSONSchema_JSONSchemaSimpleTypes + // `Format` + Format string + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + Enum []string + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration *JSONSchema_FieldConfiguration + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 JSONSchema_builder) Build() *JSONSchema { + m0 := &JSONSchema{} + b, x := &b0, m0 + _, _ = b, x + x.Ref = b.Ref + x.Title = b.Title + x.Description = b.Description + x.Default = b.Default + x.ReadOnly = b.ReadOnly + x.Example = b.Example + x.MultipleOf = b.MultipleOf + x.Maximum = b.Maximum + x.ExclusiveMaximum = b.ExclusiveMaximum + x.Minimum = b.Minimum + x.ExclusiveMinimum = b.ExclusiveMinimum + x.MaxLength = b.MaxLength + x.MinLength = b.MinLength + x.Pattern = b.Pattern + x.MaxItems = b.MaxItems + x.MinItems = b.MinItems + x.UniqueItems = b.UniqueItems + x.MaxProperties = b.MaxProperties + x.MinProperties = b.MinProperties + x.Required = b.Required + x.Array = b.Array + x.Type = b.Type + x.Format = b.Format + x.Enum = b.Enum + x.FieldConfiguration = b.FieldConfiguration + x.Extensions = b.Extensions + return m0 +} + // `Tag` is a representation of OpenAPI v2 specification's Tag object. // // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject type Tag struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // The name of the tag. Use it to allow override of the name of a // global Tag object, then use that name to reference the tag throughout the // OpenAPI file. @@ -1846,12 +2909,14 @@ type Tag struct { // Custom properties that start with "x-" such as "x-foo" used to describe // extra functionality that is not covered by the standard OpenAPI Specification. // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Tag) Reset() { *x = Tag{} - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1863,7 +2928,7 @@ func (x *Tag) String() string { func (*Tag) ProtoMessage() {} func (x *Tag) ProtoReflect() protoreflect.Message { - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1874,11 +2939,6 @@ func (x *Tag) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Tag.ProtoReflect.Descriptor instead. -func (*Tag) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{12} -} - func (x *Tag) GetName() string { if x != nil { return x.Name @@ -1907,6 +2967,62 @@ func (x *Tag) GetExtensions() map[string]*structpb.Value { return nil } +func (x *Tag) SetName(v string) { + x.Name = v +} + +func (x *Tag) SetDescription(v string) { + x.Description = v +} + +func (x *Tag) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Tag) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Tag) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Tag) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type Tag_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + Name string + // A short description for the tag. GFM syntax can be used for rich text + // representation. + Description string + // Additional external documentation for this tag. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Tag_builder) Build() *Tag { + m0 := &Tag{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Description = b.Description + x.ExternalDocs = b.ExternalDocs + x.Extensions = b.Extensions + return m0 +} + // `SecurityDefinitions` is a representation of OpenAPI v2 specification's // Security Definitions object. // @@ -1916,18 +3032,17 @@ func (x *Tag) GetExtensions() map[string]*structpb.Value { // specification. This does not enforce the security schemes on the operations // and only serves to provide the relevant details for each scheme. type SecurityDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // A single security scheme definition, mapping a "name" to the scheme it // defines. - Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SecurityDefinitions) Reset() { *x = SecurityDefinitions{} - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1939,7 +3054,7 @@ func (x *SecurityDefinitions) String() string { func (*SecurityDefinitions) ProtoMessage() {} func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1950,11 +3065,6 @@ func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{13} -} - func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme { if x != nil { return x.Security @@ -1962,6 +3072,26 @@ func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme { return nil } +func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) { + x.Security = v +} + +type SecurityDefinitions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + Security map[string]*SecurityScheme +} + +func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions { + m0 := &SecurityDefinitions{} + b, x := &b0, m0 + _, _ = b, x + x.Security = b.Security + return m0 +} + // `SecurityScheme` is a representation of OpenAPI v2 specification's // Security Scheme object. // @@ -1972,10 +3102,7 @@ func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme { // a header or as a query parameter) and OAuth2's common flows (implicit, // password, application and access code). type SecurityScheme struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // The type of the security scheme. Valid values are "basic", // "apiKey" or "oauth2". Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"` @@ -2006,12 +3133,14 @@ type SecurityScheme struct { // Custom properties that start with "x-" such as "x-foo" used to describe // extra functionality that is not covered by the standard OpenAPI Specification. // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SecurityScheme) Reset() { *x = SecurityScheme{} - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2023,7 +3152,7 @@ func (x *SecurityScheme) String() string { func (*SecurityScheme) ProtoMessage() {} func (x *SecurityScheme) ProtoReflect() protoreflect.Message { - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2034,11 +3163,6 @@ func (x *SecurityScheme) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SecurityScheme.ProtoReflect.Descriptor instead. -func (*SecurityScheme) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{14} -} - func (x *SecurityScheme) GetType() SecurityScheme_Type { if x != nil { return x.Type @@ -2102,6 +3226,105 @@ func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value { return nil } +func (x *SecurityScheme) SetType(v SecurityScheme_Type) { + x.Type = v +} + +func (x *SecurityScheme) SetDescription(v string) { + x.Description = v +} + +func (x *SecurityScheme) SetName(v string) { + x.Name = v +} + +func (x *SecurityScheme) SetIn(v SecurityScheme_In) { + x.In = v +} + +func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) { + x.Flow = v +} + +func (x *SecurityScheme) SetAuthorizationUrl(v string) { + x.AuthorizationUrl = v +} + +func (x *SecurityScheme) SetTokenUrl(v string) { + x.TokenUrl = v +} + +func (x *SecurityScheme) SetScopes(v *Scopes) { + x.Scopes = v +} + +func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *SecurityScheme) HasScopes() bool { + if x == nil { + return false + } + return x.Scopes != nil +} + +func (x *SecurityScheme) ClearScopes() { + x.Scopes = nil +} + +type SecurityScheme_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type SecurityScheme_Type + // A short description for security scheme. + Description string + // The name of the header or query parameter to be used. + // Valid for apiKey. + Name string + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In SecurityScheme_In + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow SecurityScheme_Flow + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + AuthorizationUrl string + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + TokenUrl string + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes *Scopes + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 SecurityScheme_builder) Build() *SecurityScheme { + m0 := &SecurityScheme{} + b, x := &b0, m0 + _, _ = b, x + x.Type = b.Type + x.Description = b.Description + x.Name = b.Name + x.In = b.In + x.Flow = b.Flow + x.AuthorizationUrl = b.AuthorizationUrl + x.TokenUrl = b.TokenUrl + x.Scopes = b.Scopes + x.Extensions = b.Extensions + return m0 +} + // `SecurityRequirement` is a representation of OpenAPI v2 specification's // Security Requirement object. // @@ -2114,20 +3337,19 @@ func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value { // The name used for each property MUST correspond to a security scheme // declared in the Security Definitions. type SecurityRequirement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // Each name must correspond to a security scheme which is declared in // the Security Definitions. If the security scheme is of type "oauth2", // then the value is a list of scope names required for the execution. // For other security scheme types, the array MUST be empty. - SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SecurityRequirement) Reset() { *x = SecurityRequirement{} - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2139,7 +3361,7 @@ func (x *SecurityRequirement) String() string { func (*SecurityRequirement) ProtoMessage() {} func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2150,11 +3372,6 @@ func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. -func (*SecurityRequirement) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{15} -} - func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue { if x != nil { return x.SecurityRequirement @@ -2162,24 +3379,45 @@ func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequi return nil } +func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) { + x.SecurityRequirement = v +} + +type SecurityRequirement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue +} + +func (b0 SecurityRequirement_builder) Build() *SecurityRequirement { + m0 := &SecurityRequirement{} + b, x := &b0, m0 + _, _ = b, x + x.SecurityRequirement = b.SecurityRequirement + return m0 +} + // `Scopes` is a representation of OpenAPI v2 specification's Scopes object. // // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject // // Lists the available scopes for an OAuth2 security scheme. type Scopes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // Maps between a name of a scope to a short description of it (as the value // of the property). - Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Scopes) Reset() { *x = Scopes{} - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2191,7 +3429,7 @@ func (x *Scopes) String() string { func (*Scopes) ProtoMessage() {} func (x *Scopes) ProtoReflect() protoreflect.Message { - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2202,11 +3440,6 @@ func (x *Scopes) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Scopes.ProtoReflect.Descriptor instead. -func (*Scopes) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{16} -} - func (x *Scopes) GetScope() map[string]string { if x != nil { return x.Scope @@ -2214,23 +3447,42 @@ func (x *Scopes) GetScope() map[string]string { return nil } +func (x *Scopes) SetScope(v map[string]string) { + x.Scope = v +} + +type Scopes_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Maps between a name of a scope to a short description of it (as the value + // of the property). + Scope map[string]string +} + +func (b0 Scopes_builder) Build() *Scopes { + m0 := &Scopes{} + b, x := &b0, m0 + _, _ = b, x + x.Scope = b.Scope + return m0 +} + // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. // These properties are not defined by OpenAPIv2, but they are used to control the generation. type JSONSchema_FieldConfiguration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"hybrid.v1"` // Alternative parameter name when used as path parameter. If set, this will // be used as the complete parameter name when this field is used as a path // parameter. Use this to avoid having auto generated path parameter names // for overlapping paths. PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *JSONSchema_FieldConfiguration) Reset() { *x = JSONSchema_FieldConfiguration{} - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[25] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2242,7 +3494,7 @@ func (x *JSONSchema_FieldConfiguration) String() string { func (*JSONSchema_FieldConfiguration) ProtoMessage() {} func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message { - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[25] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2253,11 +3505,6 @@ func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use JSONSchema_FieldConfiguration.ProtoReflect.Descriptor instead. -func (*JSONSchema_FieldConfiguration) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{11, 0} -} - func (x *JSONSchema_FieldConfiguration) GetPathParamName() string { if x != nil { return x.PathParamName @@ -2265,20 +3512,41 @@ func (x *JSONSchema_FieldConfiguration) GetPathParamName() string { return "" } +func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) { + x.PathParamName = v +} + +type JSONSchema_FieldConfiguration_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + PathParamName string +} + +func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration { + m0 := &JSONSchema_FieldConfiguration{} + b, x := &b0, m0 + _, _ = b, x + x.PathParamName = b.PathParamName + return m0 +} + // If the security scheme is of type "oauth2", then the value is a list of // scope names required for the execution. For other security scheme types, // the array MUST be empty. type SecurityRequirement_SecurityRequirementValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"` unknownFields protoimpl.UnknownFields - - Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SecurityRequirement_SecurityRequirementValue) Reset() { *x = SecurityRequirement_SecurityRequirementValue{} - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[30] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2290,7 +3558,7 @@ func (x *SecurityRequirement_SecurityRequirementValue) String() string { func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {} func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message { - mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[30] + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2301,11 +3569,6 @@ func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protorefle return mi.MessageOf(x) } -// Deprecated: Use SecurityRequirement_SecurityRequirementValue.ProtoReflect.Descriptor instead. -func (*SecurityRequirement_SecurityRequirementValue) Descriptor() ([]byte, []int) { - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{15, 0} -} - func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string { if x != nil { return x.Scope @@ -2313,6 +3576,24 @@ func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string { return nil } +func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) { + x.Scope = v +} + +type SecurityRequirement_SecurityRequirementValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Scope []string +} + +func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue { + m0 := &SecurityRequirement_SecurityRequirementValue{} + b, x := &b0, m0 + _, _ = b, x + x.Scope = b.Scope + return m0 +} + var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{ @@ -2593,244 +3874,263 @@ var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x22, 0xd7, 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x72, 0x65, 0x66, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, - 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, - 0x6e, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, - 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, - 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, - 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, - 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, - 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, - 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, - 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, - 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, - 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, - 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x69, 0x65, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, - 0x72, 0x72, 0x61, 0x79, 0x18, 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, - 0x79, 0x12, 0x5f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, - 0x4b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, - 0x75, 0x6d, 0x18, 0x2e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, - 0x0a, 0x13, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x1a, 0x3c, 0x0a, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, - 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, - 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, - 0x41, 0x52, 0x52, 0x41, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, - 0x41, 0x4e, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, - 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, - 0x54, 0x10, 0x06, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, - 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, - 0x17, 0x10, 0x18, 0x4a, 0x04, 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, - 0x04, 0x08, 0x1d, 0x10, 0x1e, 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, - 0x2a, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, - 0x0a, 0x03, 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, - 0x63, 0x73, 0x12, 0x5e, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, - 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, - 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x68, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, - 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, - 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xff, 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, + 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x4c, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, - 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, - 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, - 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, - 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, - 0x43, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, - 0x4b, 0x45, 0x59, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, - 0x55, 0x54, 0x48, 0x32, 0x10, 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, - 0x49, 0x4e, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, - 0x49, 0x4e, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, - 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, - 0x77, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, - 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, - 0x41, 0x53, 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, - 0x57, 0x5f, 0x41, 0x50, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, - 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, - 0x4f, 0x44, 0x45, 0x10, 0x04, 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, - 0x0a, 0x14, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, - 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, + 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, + 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, + 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18, + 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, - 0x01, 0x0a, 0x06, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70, + 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12, + 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, + 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04, + 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e, + 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a, + 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, + 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, - 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, - 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, - 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, - 0x53, 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, - 0x53, 0x53, 0x10, 0x04, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, - 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, - 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescOnce sync.Once - file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescData = file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc -) - -func file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP() []byte { - file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescOnce.Do(func() { - file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescData) - }) - return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescData + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, + 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10, + 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c, + 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, + 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50, + 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, + 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04, + 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, + 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06, + 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42, + 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35) var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{ (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type @@ -2849,86 +4149,91 @@ var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{ (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema - (*JSONSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema - (*Tag)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.Tag - (*SecurityDefinitions)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions - (*SecurityScheme)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme - (*SecurityRequirement)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement - (*Scopes)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.Scopes - nil, // 23: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry - nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry - nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry - nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry - nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry - nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry - nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry - nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry - (*JSONSchema_FieldConfiguration)(nil), // 31: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration - nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry - nil, // 33: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry - nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry - nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry - (*SecurityRequirement_SecurityRequirementValue)(nil), // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue - nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry - nil, // 38: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry - (*structpb.Value)(nil), // 39: google.protobuf.Value + (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes + nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + (*structpb.Value)(nil), // 41: google.protobuf.Value } var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{ 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme - 23, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry - 19, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions - 21, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement - 18, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation - 24, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation - 25, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme - 21, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement - 26, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema - 27, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry - 28, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry - 29, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License - 30, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry - 17, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation - 2, // 25: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes - 31, // 26: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration - 32, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry - 15, // 28: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation - 33, // 29: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry - 34, // 30: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry - 3, // 31: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type - 4, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In - 5, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow - 22, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes - 35, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry - 37, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry - 38, // 37: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry - 11, // 38: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response - 39, // 39: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value - 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response - 39, // 41: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value - 10, // 42: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header - 39, // 43: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value - 39, // 44: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value - 39, // 45: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value - 39, // 46: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value - 20, // 47: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme - 39, // 48: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value - 36, // 49: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue - 50, // [50:50] is the sub-list for method output_type - 50, // [50:50] is the sub-list for method input_type - 50, // [50:50] is the sub-list for extension type_name - 50, // [50:50] is the sub-list for extension extendee - 0, // [0:50] is the sub-list for field type_name + 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes + 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value + 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value + 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header + 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value + 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value + 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + 53, // [53:53] is the sub-list for method output_type + 53, // [53:53] is the sub-list for method input_type + 53, // [53:53] is the sub-list for extension type_name + 53, // [53:53] is the sub-list for extension extendee + 0, // [0:53] is the sub-list for field type_name } func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() } @@ -2942,7 +4247,7 @@ func file_protoc_gen_openapiv2_options_openapiv2_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc, NumEnums: 6, - NumMessages: 33, + NumMessages: 35, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto index 88f9ffd008d..5313f0818ae 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto @@ -443,6 +443,45 @@ message Schema { string example = 6; } +// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object. +// Only fields that are applicable to Enums are included +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = { +// ... +// title: "MyEnum"; +// description:"This is my nice enum"; +// example: "ZERO"; +// required: true; +// ... +// }; +// +message EnumSchema { + // A short description of the schema. + string description = 1; + string default = 2; + // The title of the schema. + string title = 3; + bool required = 4; + bool read_only = 5; + // Additional external documentation for this schema. + ExternalDocumentation external_docs = 6; + string example = 7; + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // `ref: ".google.protobuf.Timestamp"`. + string ref = 8; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 9; +} + // `JSONSchema` represents properties from JSON Schema taken, and as used, in // the OpenAPI v2 spec. // diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json deleted file mode 100644 index 97dd6459ddb..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "protoc-gen-openapiv2/options/openapiv2.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": {}, - "definitions": { - "protobufAny": { - "type": "object", - "properties": { - "@type": { - "type": "string" - } - }, - "additionalProperties": {} - }, - "rpcStatus": { - "type": "object", - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go new file mode 100644 index 00000000000..1f0e0c26916 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go @@ -0,0 +1,4055 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/openapiv2.proto + +//go:build protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +type Scheme int32 + +const ( + Scheme_UNKNOWN Scheme = 0 + Scheme_HTTP Scheme = 1 + Scheme_HTTPS Scheme = 2 + Scheme_WS Scheme = 3 + Scheme_WSS Scheme = 4 +) + +// Enum value maps for Scheme. +var ( + Scheme_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HTTP", + 2: "HTTPS", + 3: "WS", + 4: "WSS", + } + Scheme_value = map[string]int32{ + "UNKNOWN": 0, + "HTTP": 1, + "HTTPS": 2, + "WS": 3, + "WSS": 4, + } +) + +func (x Scheme) Enum() *Scheme { + p := new(Scheme) + *p = x + return p +} + +func (x Scheme) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Scheme) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor() +} + +func (Scheme) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0] +} + +func (x Scheme) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Type` is a supported HTTP header type. +// See https://swagger.io/specification/v2/#parameterType. +type HeaderParameter_Type int32 + +const ( + HeaderParameter_UNKNOWN HeaderParameter_Type = 0 + HeaderParameter_STRING HeaderParameter_Type = 1 + HeaderParameter_NUMBER HeaderParameter_Type = 2 + HeaderParameter_INTEGER HeaderParameter_Type = 3 + HeaderParameter_BOOLEAN HeaderParameter_Type = 4 +) + +// Enum value maps for HeaderParameter_Type. +var ( + HeaderParameter_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STRING", + 2: "NUMBER", + 3: "INTEGER", + 4: "BOOLEAN", + } + HeaderParameter_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STRING": 1, + "NUMBER": 2, + "INTEGER": 3, + "BOOLEAN": 4, + } +) + +func (x HeaderParameter_Type) Enum() *HeaderParameter_Type { + p := new(HeaderParameter_Type) + *p = x + return p +} + +func (x HeaderParameter_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor() +} + +func (HeaderParameter_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1] +} + +func (x HeaderParameter_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type JSONSchema_JSONSchemaSimpleTypes int32 + +const ( + JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0 + JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1 + JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2 + JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3 + JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4 + JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5 + JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6 + JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7 +) + +// Enum value maps for JSONSchema_JSONSchemaSimpleTypes. +var ( + JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARRAY", + 2: "BOOLEAN", + 3: "INTEGER", + 4: "NULL", + 5: "NUMBER", + 6: "OBJECT", + 7: "STRING", + } + JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{ + "UNKNOWN": 0, + "ARRAY": 1, + "BOOLEAN": 2, + "INTEGER": 3, + "NULL": 4, + "NUMBER": 5, + "OBJECT": 6, + "STRING": 7, + } +) + +func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes { + p := new(JSONSchema_JSONSchemaSimpleTypes) + *p = x + return p +} + +func (x JSONSchema_JSONSchemaSimpleTypes) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor() +} + +func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2] +} + +func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The type of the security scheme. Valid values are "basic", +// "apiKey" or "oauth2". +type SecurityScheme_Type int32 + +const ( + SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0 + SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1 + SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2 + SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3 +) + +// Enum value maps for SecurityScheme_Type. +var ( + SecurityScheme_Type_name = map[int32]string{ + 0: "TYPE_INVALID", + 1: "TYPE_BASIC", + 2: "TYPE_API_KEY", + 3: "TYPE_OAUTH2", + } + SecurityScheme_Type_value = map[string]int32{ + "TYPE_INVALID": 0, + "TYPE_BASIC": 1, + "TYPE_API_KEY": 2, + "TYPE_OAUTH2": 3, + } +) + +func (x SecurityScheme_Type) Enum() *SecurityScheme_Type { + p := new(SecurityScheme_Type) + *p = x + return p +} + +func (x SecurityScheme_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor() +} + +func (SecurityScheme_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3] +} + +func (x SecurityScheme_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The location of the API key. Valid values are "query" or "header". +type SecurityScheme_In int32 + +const ( + SecurityScheme_IN_INVALID SecurityScheme_In = 0 + SecurityScheme_IN_QUERY SecurityScheme_In = 1 + SecurityScheme_IN_HEADER SecurityScheme_In = 2 +) + +// Enum value maps for SecurityScheme_In. +var ( + SecurityScheme_In_name = map[int32]string{ + 0: "IN_INVALID", + 1: "IN_QUERY", + 2: "IN_HEADER", + } + SecurityScheme_In_value = map[string]int32{ + "IN_INVALID": 0, + "IN_QUERY": 1, + "IN_HEADER": 2, + } +) + +func (x SecurityScheme_In) Enum() *SecurityScheme_In { + p := new(SecurityScheme_In) + *p = x + return p +} + +func (x SecurityScheme_In) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor() +} + +func (SecurityScheme_In) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4] +} + +func (x SecurityScheme_In) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The flow used by the OAuth2 security scheme. Valid values are +// "implicit", "password", "application" or "accessCode". +type SecurityScheme_Flow int32 + +const ( + SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0 + SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1 + SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2 + SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3 + SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4 +) + +// Enum value maps for SecurityScheme_Flow. +var ( + SecurityScheme_Flow_name = map[int32]string{ + 0: "FLOW_INVALID", + 1: "FLOW_IMPLICIT", + 2: "FLOW_PASSWORD", + 3: "FLOW_APPLICATION", + 4: "FLOW_ACCESS_CODE", + } + SecurityScheme_Flow_value = map[string]int32{ + "FLOW_INVALID": 0, + "FLOW_IMPLICIT": 1, + "FLOW_PASSWORD": 2, + "FLOW_APPLICATION": 3, + "FLOW_ACCESS_CODE": 4, + } +) + +func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow { + p := new(SecurityScheme_Flow) + *p = x + return p +} + +func (x SecurityScheme_Flow) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor() +} + +func (SecurityScheme_Flow) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5] +} + +func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +type Swagger struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + xxx_hidden_Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + xxx_hidden_Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + xxx_hidden_BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + xxx_hidden_Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + xxx_hidden_Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + xxx_hidden_Tags *[]*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Swagger) Reset() { + *x = Swagger{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Swagger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Swagger) ProtoMessage() {} + +func (x *Swagger) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Swagger) GetSwagger() string { + if x != nil { + return x.xxx_hidden_Swagger + } + return "" +} + +func (x *Swagger) GetInfo() *Info { + if x != nil { + return x.xxx_hidden_Info + } + return nil +} + +func (x *Swagger) GetHost() string { + if x != nil { + return x.xxx_hidden_Host + } + return "" +} + +func (x *Swagger) GetBasePath() string { + if x != nil { + return x.xxx_hidden_BasePath + } + return "" +} + +func (x *Swagger) GetSchemes() []Scheme { + if x != nil { + return x.xxx_hidden_Schemes + } + return nil +} + +func (x *Swagger) GetConsumes() []string { + if x != nil { + return x.xxx_hidden_Consumes + } + return nil +} + +func (x *Swagger) GetProduces() []string { + if x != nil { + return x.xxx_hidden_Produces + } + return nil +} + +func (x *Swagger) GetResponses() map[string]*Response { + if x != nil { + return x.xxx_hidden_Responses + } + return nil +} + +func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.xxx_hidden_SecurityDefinitions + } + return nil +} + +func (x *Swagger) GetSecurity() []*SecurityRequirement { + if x != nil { + if x.xxx_hidden_Security != nil { + return *x.xxx_hidden_Security + } + } + return nil +} + +func (x *Swagger) GetTags() []*Tag { + if x != nil { + if x.xxx_hidden_Tags != nil { + return *x.xxx_hidden_Tags + } + } + return nil +} + +func (x *Swagger) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Swagger) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Swagger) SetSwagger(v string) { + x.xxx_hidden_Swagger = v +} + +func (x *Swagger) SetInfo(v *Info) { + x.xxx_hidden_Info = v +} + +func (x *Swagger) SetHost(v string) { + x.xxx_hidden_Host = v +} + +func (x *Swagger) SetBasePath(v string) { + x.xxx_hidden_BasePath = v +} + +func (x *Swagger) SetSchemes(v []Scheme) { + x.xxx_hidden_Schemes = v +} + +func (x *Swagger) SetConsumes(v []string) { + x.xxx_hidden_Consumes = v +} + +func (x *Swagger) SetProduces(v []string) { + x.xxx_hidden_Produces = v +} + +func (x *Swagger) SetResponses(v map[string]*Response) { + x.xxx_hidden_Responses = v +} + +func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) { + x.xxx_hidden_SecurityDefinitions = v +} + +func (x *Swagger) SetSecurity(v []*SecurityRequirement) { + x.xxx_hidden_Security = &v +} + +func (x *Swagger) SetTags(v []*Tag) { + x.xxx_hidden_Tags = &v +} + +func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Swagger) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Swagger) HasInfo() bool { + if x == nil { + return false + } + return x.xxx_hidden_Info != nil +} + +func (x *Swagger) HasSecurityDefinitions() bool { + if x == nil { + return false + } + return x.xxx_hidden_SecurityDefinitions != nil +} + +func (x *Swagger) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Swagger) ClearInfo() { + x.xxx_hidden_Info = nil +} + +func (x *Swagger) ClearSecurityDefinitions() { + x.xxx_hidden_SecurityDefinitions = nil +} + +func (x *Swagger) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type Swagger_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + Swagger string + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info *Info + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + Host string + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + BasePath string + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + Schemes []Scheme + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Consumes []string + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Produces []string + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + Responses map[string]*Response + // Security scheme definitions that can be used across the specification. + SecurityDefinitions *SecurityDefinitions + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + Security []*SecurityRequirement + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []*Tag + // Additional external documentation. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Swagger_builder) Build() *Swagger { + m0 := &Swagger{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Swagger = b.Swagger + x.xxx_hidden_Info = b.Info + x.xxx_hidden_Host = b.Host + x.xxx_hidden_BasePath = b.BasePath + x.xxx_hidden_Schemes = b.Schemes + x.xxx_hidden_Consumes = b.Consumes + x.xxx_hidden_Produces = b.Produces + x.xxx_hidden_Responses = b.Responses + x.xxx_hidden_SecurityDefinitions = b.SecurityDefinitions + x.xxx_hidden_Security = &b.Security + x.xxx_hidden_Tags = &b.Tags + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +type Operation struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + xxx_hidden_Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + xxx_hidden_Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + xxx_hidden_Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Operation) Reset() { + *x = Operation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.xxx_hidden_Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.xxx_hidden_Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.xxx_hidden_OperationId + } + return "" +} + +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.xxx_hidden_Consumes + } + return nil +} + +func (x *Operation) GetProduces() []string { + if x != nil { + return x.xxx_hidden_Produces + } + return nil +} + +func (x *Operation) GetResponses() map[string]*Response { + if x != nil { + return x.xxx_hidden_Responses + } + return nil +} + +func (x *Operation) GetSchemes() []Scheme { + if x != nil { + return x.xxx_hidden_Schemes + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.xxx_hidden_Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + if x.xxx_hidden_Security != nil { + return *x.xxx_hidden_Security + } + } + return nil +} + +func (x *Operation) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Operation) GetParameters() *Parameters { + if x != nil { + return x.xxx_hidden_Parameters + } + return nil +} + +func (x *Operation) SetTags(v []string) { + x.xxx_hidden_Tags = v +} + +func (x *Operation) SetSummary(v string) { + x.xxx_hidden_Summary = v +} + +func (x *Operation) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Operation) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Operation) SetOperationId(v string) { + x.xxx_hidden_OperationId = v +} + +func (x *Operation) SetConsumes(v []string) { + x.xxx_hidden_Consumes = v +} + +func (x *Operation) SetProduces(v []string) { + x.xxx_hidden_Produces = v +} + +func (x *Operation) SetResponses(v map[string]*Response) { + x.xxx_hidden_Responses = v +} + +func (x *Operation) SetSchemes(v []Scheme) { + x.xxx_hidden_Schemes = v +} + +func (x *Operation) SetDeprecated(v bool) { + x.xxx_hidden_Deprecated = v +} + +func (x *Operation) SetSecurity(v []*SecurityRequirement) { + x.xxx_hidden_Security = &v +} + +func (x *Operation) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Operation) SetParameters(v *Parameters) { + x.xxx_hidden_Parameters = v +} + +func (x *Operation) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Operation) HasParameters() bool { + if x == nil { + return false + } + return x.xxx_hidden_Parameters != nil +} + +func (x *Operation) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +func (x *Operation) ClearParameters() { + x.xxx_hidden_Parameters = nil +} + +type Operation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []string + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + Summary string + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + Description string + // Additional external documentation for this operation. + ExternalDocs *ExternalDocumentation + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + OperationId string + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Consumes []string + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Produces []string + // The list of possible responses as they are returned from executing this + // operation. + Responses map[string]*Response + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + Schemes []Scheme + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + Deprecated bool + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + Security []*SecurityRequirement + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters *Parameters +} + +func (b0 Operation_builder) Build() *Operation { + m0 := &Operation{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Tags = b.Tags + x.xxx_hidden_Summary = b.Summary + x.xxx_hidden_Description = b.Description + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_OperationId = b.OperationId + x.xxx_hidden_Consumes = b.Consumes + x.xxx_hidden_Produces = b.Produces + x.xxx_hidden_Responses = b.Responses + x.xxx_hidden_Schemes = b.Schemes + x.xxx_hidden_Deprecated = b.Deprecated + x.xxx_hidden_Security = &b.Security + x.xxx_hidden_Extensions = b.Extensions + x.xxx_hidden_Parameters = b.Parameters + return m0 +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +type Parameters struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Headers *[]*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Parameters) Reset() { + *x = Parameters{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Parameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameters) ProtoMessage() {} + +func (x *Parameters) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Parameters) GetHeaders() []*HeaderParameter { + if x != nil { + if x.xxx_hidden_Headers != nil { + return *x.xxx_hidden_Headers + } + } + return nil +} + +func (x *Parameters) SetHeaders(v []*HeaderParameter) { + x.xxx_hidden_Headers = &v +} + +type Parameters_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + Headers []*HeaderParameter +} + +func (b0 Parameters_builder) Build() *Parameters { + m0 := &Parameters{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Headers = &b.Headers + return m0 +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +type HeaderParameter struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"` + xxx_hidden_Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"` + xxx_hidden_Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeaderParameter) Reset() { + *x = HeaderParameter{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameter) ProtoMessage() {} + +func (x *HeaderParameter) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *HeaderParameter) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *HeaderParameter) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *HeaderParameter) GetType() HeaderParameter_Type { + if x != nil { + return x.xxx_hidden_Type + } + return HeaderParameter_UNKNOWN +} + +func (x *HeaderParameter) GetFormat() string { + if x != nil { + return x.xxx_hidden_Format + } + return "" +} + +func (x *HeaderParameter) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *HeaderParameter) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *HeaderParameter) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *HeaderParameter) SetType(v HeaderParameter_Type) { + x.xxx_hidden_Type = v +} + +func (x *HeaderParameter) SetFormat(v string) { + x.xxx_hidden_Format = v +} + +func (x *HeaderParameter) SetRequired(v bool) { + x.xxx_hidden_Required = v +} + +type HeaderParameter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Name` is the header name. + Name string + // `Description` is a short description of the header. + Description string + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type HeaderParameter_Type + // `Format` The extending format for the previously mentioned type. + Format string + // `Required` indicates if the header is optional + Required bool +} + +func (b0 HeaderParameter_builder) Build() *HeaderParameter { + m0 := &HeaderParameter{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Format = b.Format + x.xxx_hidden_Required = b.Required + return m0 +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +type Header struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + xxx_hidden_Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + xxx_hidden_Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"` + xxx_hidden_Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Header) Reset() { + *x = Header{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Header) GetType() string { + if x != nil { + return x.xxx_hidden_Type + } + return "" +} + +func (x *Header) GetFormat() string { + if x != nil { + return x.xxx_hidden_Format + } + return "" +} + +func (x *Header) GetDefault() string { + if x != nil { + return x.xxx_hidden_Default + } + return "" +} + +func (x *Header) GetPattern() string { + if x != nil { + return x.xxx_hidden_Pattern + } + return "" +} + +func (x *Header) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Header) SetType(v string) { + x.xxx_hidden_Type = v +} + +func (x *Header) SetFormat(v string) { + x.xxx_hidden_Format = v +} + +func (x *Header) SetDefault(v string) { + x.xxx_hidden_Default = v +} + +func (x *Header) SetPattern(v string) { + x.xxx_hidden_Pattern = v +} + +type Header_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the header. + Description string + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + Type string + // `Format` The extending format for the previously mentioned type. + Format string + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + Default string + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + Pattern string +} + +func (b0 Header_builder) Build() *Header { + m0 := &Header{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Format = b.Format + x.xxx_hidden_Default = b.Default + x.xxx_hidden_Pattern = b.Pattern + return m0 +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +type Response struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + xxx_hidden_Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Response) Reset() { + *x = Response{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Response) GetSchema() *Schema { + if x != nil { + return x.xxx_hidden_Schema + } + return nil +} + +func (x *Response) GetHeaders() map[string]*Header { + if x != nil { + return x.xxx_hidden_Headers + } + return nil +} + +func (x *Response) GetExamples() map[string]string { + if x != nil { + return x.xxx_hidden_Examples + } + return nil +} + +func (x *Response) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Response) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Response) SetSchema(v *Schema) { + x.xxx_hidden_Schema = v +} + +func (x *Response) SetHeaders(v map[string]*Header) { + x.xxx_hidden_Headers = v +} + +func (x *Response) SetExamples(v map[string]string) { + x.xxx_hidden_Examples = v +} + +func (x *Response) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Response) HasSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_Schema != nil +} + +func (x *Response) ClearSchema() { + x.xxx_hidden_Schema = nil +} + +type Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Response_builder) Build() *Response { + m0 := &Response{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Schema = b.Schema + x.xxx_hidden_Headers = b.Headers + x.xxx_hidden_Examples = b.Examples + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +type Info struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + xxx_hidden_Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"` + xxx_hidden_License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` + xxx_hidden_Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Info) Reset() { + *x = Info{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.xxx_hidden_Title + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.xxx_hidden_TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.xxx_hidden_Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.xxx_hidden_License + } + return nil +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.xxx_hidden_Version + } + return "" +} + +func (x *Info) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Info) SetTitle(v string) { + x.xxx_hidden_Title = v +} + +func (x *Info) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Info) SetTermsOfService(v string) { + x.xxx_hidden_TermsOfService = v +} + +func (x *Info) SetContact(v *Contact) { + x.xxx_hidden_Contact = v +} + +func (x *Info) SetLicense(v *License) { + x.xxx_hidden_License = v +} + +func (x *Info) SetVersion(v string) { + x.xxx_hidden_Version = v +} + +func (x *Info) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Info) HasContact() bool { + if x == nil { + return false + } + return x.xxx_hidden_Contact != nil +} + +func (x *Info) HasLicense() bool { + if x == nil { + return false + } + return x.xxx_hidden_License != nil +} + +func (x *Info) ClearContact() { + x.xxx_hidden_Contact = nil +} + +func (x *Info) ClearLicense() { + x.xxx_hidden_License = nil +} + +type Info_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The title of the application. + Title string + // A short description of the application. GFM syntax can be used for rich + // text representation. + Description string + // The Terms of Service for the API. + TermsOfService string + // The contact information for the exposed API. + Contact *Contact + // The license information for the exposed API. + License *License + // Provides the version of the application API (not to be confused + // with the specification version). + Version string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Info_builder) Build() *Info { + m0 := &Info{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Title = b.Title + x.xxx_hidden_Description = b.Description + x.xxx_hidden_TermsOfService = b.TermsOfService + x.xxx_hidden_Contact = b.Contact + x.xxx_hidden_License = b.License + x.xxx_hidden_Version = b.Version + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +type Contact struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + xxx_hidden_Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Contact) Reset() { + *x = Contact{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Contact) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.xxx_hidden_Email + } + return "" +} + +func (x *Contact) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Contact) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +func (x *Contact) SetEmail(v string) { + x.xxx_hidden_Email = v +} + +type Contact_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The identifying name of the contact person/organization. + Name string + // The URL pointing to the contact information. MUST be in the format of a + // URL. + Url string + // The email address of the contact person/organization. MUST be in the format + // of an email address. + Email string +} + +func (b0 Contact_builder) Build() *Contact { + m0 := &Contact{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Url = b.Url + x.xxx_hidden_Email = b.Email + return m0 +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +type License struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *License) Reset() { + *x = License{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *License) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *License) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *License) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +type License_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The license name used for the API. + Name string + // A URL to the license used for the API. MUST be in the format of a URL. + Url string +} + +func (b0 License_builder) Build() *License { + m0 := &License{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Url = b.Url + return m0 +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +type ExternalDocumentation struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalDocumentation) Reset() { + *x = ExternalDocumentation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalDocumentation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocumentation) ProtoMessage() {} + +func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalDocumentation) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *ExternalDocumentation) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *ExternalDocumentation) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *ExternalDocumentation) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +type ExternalDocumentation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + Description string + // The URL for the target documentation. Value MUST be in the format + // of a URL. + Url string +} + +func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation { + m0 := &ExternalDocumentation{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Url = b.Url + return m0 +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +type Schema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"` + xxx_hidden_Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + xxx_hidden_ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Schema) Reset() { + *x = Schema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Schema) GetJsonSchema() *JSONSchema { + if x != nil { + return x.xxx_hidden_JsonSchema + } + return nil +} + +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.xxx_hidden_Discriminator + } + return "" +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.xxx_hidden_ReadOnly + } + return false +} + +func (x *Schema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() string { + if x != nil { + return x.xxx_hidden_Example + } + return "" +} + +func (x *Schema) SetJsonSchema(v *JSONSchema) { + x.xxx_hidden_JsonSchema = v +} + +func (x *Schema) SetDiscriminator(v string) { + x.xxx_hidden_Discriminator = v +} + +func (x *Schema) SetReadOnly(v bool) { + x.xxx_hidden_ReadOnly = v +} + +func (x *Schema) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Schema) SetExample(v string) { + x.xxx_hidden_Example = v +} + +func (x *Schema) HasJsonSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_JsonSchema != nil +} + +func (x *Schema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Schema) ClearJsonSchema() { + x.xxx_hidden_JsonSchema = nil +} + +func (x *Schema) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type Schema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + JsonSchema *JSONSchema + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + Discriminator string + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + Example string +} + +func (b0 Schema_builder) Build() *Schema { + m0 := &Schema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_JsonSchema = b.JsonSchema + x.xxx_hidden_Discriminator = b.Discriminator + x.xxx_hidden_ReadOnly = b.ReadOnly + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Example = b.Example + return m0 +} + +// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object. +// Only fields that are applicable to Enums are included +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = { +// ... +// title: "MyEnum"; +// description:"This is my nice enum"; +// example: "ZERO"; +// required: true; +// ... +// }; +type EnumSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + xxx_hidden_Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + xxx_hidden_Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + xxx_hidden_ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"` + xxx_hidden_Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnumSchema) Reset() { + *x = EnumSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnumSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumSchema) ProtoMessage() {} + +func (x *EnumSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EnumSchema) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *EnumSchema) GetDefault() string { + if x != nil { + return x.xxx_hidden_Default + } + return "" +} + +func (x *EnumSchema) GetTitle() string { + if x != nil { + return x.xxx_hidden_Title + } + return "" +} + +func (x *EnumSchema) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *EnumSchema) GetReadOnly() bool { + if x != nil { + return x.xxx_hidden_ReadOnly + } + return false +} + +func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *EnumSchema) GetExample() string { + if x != nil { + return x.xxx_hidden_Example + } + return "" +} + +func (x *EnumSchema) GetRef() string { + if x != nil { + return x.xxx_hidden_Ref + } + return "" +} + +func (x *EnumSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *EnumSchema) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *EnumSchema) SetDefault(v string) { + x.xxx_hidden_Default = v +} + +func (x *EnumSchema) SetTitle(v string) { + x.xxx_hidden_Title = v +} + +func (x *EnumSchema) SetRequired(v bool) { + x.xxx_hidden_Required = v +} + +func (x *EnumSchema) SetReadOnly(v bool) { + x.xxx_hidden_ReadOnly = v +} + +func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *EnumSchema) SetExample(v string) { + x.xxx_hidden_Example = v +} + +func (x *EnumSchema) SetRef(v string) { + x.xxx_hidden_Ref = v +} + +func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *EnumSchema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *EnumSchema) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type EnumSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the schema. + Description string + Default string + // The title of the schema. + Title string + Required bool + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + Example string + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 EnumSchema_builder) Build() *EnumSchema { + m0 := &EnumSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Default = b.Default + x.xxx_hidden_Title = b.Title + x.xxx_hidden_Required = b.Required + x.xxx_hidden_ReadOnly = b.ReadOnly + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Example = b.Example + x.xxx_hidden_Ref = b.Ref + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +type JSONSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + xxx_hidden_Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"` + xxx_hidden_ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + xxx_hidden_Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + xxx_hidden_MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + xxx_hidden_Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + xxx_hidden_ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + xxx_hidden_Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + xxx_hidden_ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + xxx_hidden_MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + xxx_hidden_MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + xxx_hidden_Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + xxx_hidden_MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + xxx_hidden_MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + xxx_hidden_UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + xxx_hidden_MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + xxx_hidden_MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + xxx_hidden_Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"` + xxx_hidden_Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"` + xxx_hidden_Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"` + xxx_hidden_Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"` + xxx_hidden_Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"` + xxx_hidden_FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema) Reset() { + *x = JSONSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema) ProtoMessage() {} + +func (x *JSONSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema) GetRef() string { + if x != nil { + return x.xxx_hidden_Ref + } + return "" +} + +func (x *JSONSchema) GetTitle() string { + if x != nil { + return x.xxx_hidden_Title + } + return "" +} + +func (x *JSONSchema) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *JSONSchema) GetDefault() string { + if x != nil { + return x.xxx_hidden_Default + } + return "" +} + +func (x *JSONSchema) GetReadOnly() bool { + if x != nil { + return x.xxx_hidden_ReadOnly + } + return false +} + +func (x *JSONSchema) GetExample() string { + if x != nil { + return x.xxx_hidden_Example + } + return "" +} + +func (x *JSONSchema) GetMultipleOf() float64 { + if x != nil { + return x.xxx_hidden_MultipleOf + } + return 0 +} + +func (x *JSONSchema) GetMaximum() float64 { + if x != nil { + return x.xxx_hidden_Maximum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.xxx_hidden_ExclusiveMaximum + } + return false +} + +func (x *JSONSchema) GetMinimum() float64 { + if x != nil { + return x.xxx_hidden_Minimum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.xxx_hidden_ExclusiveMinimum + } + return false +} + +func (x *JSONSchema) GetMaxLength() uint64 { + if x != nil { + return x.xxx_hidden_MaxLength + } + return 0 +} + +func (x *JSONSchema) GetMinLength() uint64 { + if x != nil { + return x.xxx_hidden_MinLength + } + return 0 +} + +func (x *JSONSchema) GetPattern() string { + if x != nil { + return x.xxx_hidden_Pattern + } + return "" +} + +func (x *JSONSchema) GetMaxItems() uint64 { + if x != nil { + return x.xxx_hidden_MaxItems + } + return 0 +} + +func (x *JSONSchema) GetMinItems() uint64 { + if x != nil { + return x.xxx_hidden_MinItems + } + return 0 +} + +func (x *JSONSchema) GetUniqueItems() bool { + if x != nil { + return x.xxx_hidden_UniqueItems + } + return false +} + +func (x *JSONSchema) GetMaxProperties() uint64 { + if x != nil { + return x.xxx_hidden_MaxProperties + } + return 0 +} + +func (x *JSONSchema) GetMinProperties() uint64 { + if x != nil { + return x.xxx_hidden_MinProperties + } + return 0 +} + +func (x *JSONSchema) GetRequired() []string { + if x != nil { + return x.xxx_hidden_Required + } + return nil +} + +func (x *JSONSchema) GetArray() []string { + if x != nil { + return x.xxx_hidden_Array + } + return nil +} + +func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes { + if x != nil { + return x.xxx_hidden_Type + } + return nil +} + +func (x *JSONSchema) GetFormat() string { + if x != nil { + return x.xxx_hidden_Format + } + return "" +} + +func (x *JSONSchema) GetEnum() []string { + if x != nil { + return x.xxx_hidden_Enum + } + return nil +} + +func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration { + if x != nil { + return x.xxx_hidden_FieldConfiguration + } + return nil +} + +func (x *JSONSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *JSONSchema) SetRef(v string) { + x.xxx_hidden_Ref = v +} + +func (x *JSONSchema) SetTitle(v string) { + x.xxx_hidden_Title = v +} + +func (x *JSONSchema) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *JSONSchema) SetDefault(v string) { + x.xxx_hidden_Default = v +} + +func (x *JSONSchema) SetReadOnly(v bool) { + x.xxx_hidden_ReadOnly = v +} + +func (x *JSONSchema) SetExample(v string) { + x.xxx_hidden_Example = v +} + +func (x *JSONSchema) SetMultipleOf(v float64) { + x.xxx_hidden_MultipleOf = v +} + +func (x *JSONSchema) SetMaximum(v float64) { + x.xxx_hidden_Maximum = v +} + +func (x *JSONSchema) SetExclusiveMaximum(v bool) { + x.xxx_hidden_ExclusiveMaximum = v +} + +func (x *JSONSchema) SetMinimum(v float64) { + x.xxx_hidden_Minimum = v +} + +func (x *JSONSchema) SetExclusiveMinimum(v bool) { + x.xxx_hidden_ExclusiveMinimum = v +} + +func (x *JSONSchema) SetMaxLength(v uint64) { + x.xxx_hidden_MaxLength = v +} + +func (x *JSONSchema) SetMinLength(v uint64) { + x.xxx_hidden_MinLength = v +} + +func (x *JSONSchema) SetPattern(v string) { + x.xxx_hidden_Pattern = v +} + +func (x *JSONSchema) SetMaxItems(v uint64) { + x.xxx_hidden_MaxItems = v +} + +func (x *JSONSchema) SetMinItems(v uint64) { + x.xxx_hidden_MinItems = v +} + +func (x *JSONSchema) SetUniqueItems(v bool) { + x.xxx_hidden_UniqueItems = v +} + +func (x *JSONSchema) SetMaxProperties(v uint64) { + x.xxx_hidden_MaxProperties = v +} + +func (x *JSONSchema) SetMinProperties(v uint64) { + x.xxx_hidden_MinProperties = v +} + +func (x *JSONSchema) SetRequired(v []string) { + x.xxx_hidden_Required = v +} + +func (x *JSONSchema) SetArray(v []string) { + x.xxx_hidden_Array = v +} + +func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) { + x.xxx_hidden_Type = v +} + +func (x *JSONSchema) SetFormat(v string) { + x.xxx_hidden_Format = v +} + +func (x *JSONSchema) SetEnum(v []string) { + x.xxx_hidden_Enum = v +} + +func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) { + x.xxx_hidden_FieldConfiguration = v +} + +func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *JSONSchema) HasFieldConfiguration() bool { + if x == nil { + return false + } + return x.xxx_hidden_FieldConfiguration != nil +} + +func (x *JSONSchema) ClearFieldConfiguration() { + x.xxx_hidden_FieldConfiguration = nil +} + +type JSONSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // The title of the schema. + Title string + // A short description of the schema. + Description string + Default string + ReadOnly bool + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + Example string + MultipleOf float64 + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + Maximum float64 + ExclusiveMaximum bool + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + Minimum float64 + ExclusiveMinimum bool + MaxLength uint64 + MinLength uint64 + Pattern string + MaxItems uint64 + MinItems uint64 + UniqueItems bool + MaxProperties uint64 + MinProperties uint64 + Required []string + // Items in 'array' must be unique. + Array []string + Type []JSONSchema_JSONSchemaSimpleTypes + // `Format` + Format string + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + Enum []string + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration *JSONSchema_FieldConfiguration + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 JSONSchema_builder) Build() *JSONSchema { + m0 := &JSONSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Ref = b.Ref + x.xxx_hidden_Title = b.Title + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Default = b.Default + x.xxx_hidden_ReadOnly = b.ReadOnly + x.xxx_hidden_Example = b.Example + x.xxx_hidden_MultipleOf = b.MultipleOf + x.xxx_hidden_Maximum = b.Maximum + x.xxx_hidden_ExclusiveMaximum = b.ExclusiveMaximum + x.xxx_hidden_Minimum = b.Minimum + x.xxx_hidden_ExclusiveMinimum = b.ExclusiveMinimum + x.xxx_hidden_MaxLength = b.MaxLength + x.xxx_hidden_MinLength = b.MinLength + x.xxx_hidden_Pattern = b.Pattern + x.xxx_hidden_MaxItems = b.MaxItems + x.xxx_hidden_MinItems = b.MinItems + x.xxx_hidden_UniqueItems = b.UniqueItems + x.xxx_hidden_MaxProperties = b.MaxProperties + x.xxx_hidden_MinProperties = b.MinProperties + x.xxx_hidden_Required = b.Required + x.xxx_hidden_Array = b.Array + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Format = b.Format + x.xxx_hidden_Enum = b.Enum + x.xxx_hidden_FieldConfiguration = b.FieldConfiguration + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +type Tag struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Tag) Reset() { + *x = Tag{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Tag) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Tag) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Tag) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Tag) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Tag) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Tag) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Tag) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Tag) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type Tag_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + Name string + // A short description for the tag. GFM syntax can be used for rich text + // representation. + Description string + // Additional external documentation for this tag. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Tag_builder) Build() *Tag { + m0 := &Tag{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Description = b.Description + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +type SecurityDefinitions struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme { + if x != nil { + return x.xxx_hidden_Security + } + return nil +} + +func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) { + x.xxx_hidden_Security = v +} + +type SecurityDefinitions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + Security map[string]*SecurityScheme +} + +func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions { + m0 := &SecurityDefinitions{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Security = b.Security + return m0 +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +type SecurityScheme struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"` + xxx_hidden_Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"` + xxx_hidden_AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + xxx_hidden_TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + xxx_hidden_Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityScheme) Reset() { + *x = SecurityScheme{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityScheme) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityScheme) ProtoMessage() {} + +func (x *SecurityScheme) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityScheme) GetType() SecurityScheme_Type { + if x != nil { + return x.xxx_hidden_Type + } + return SecurityScheme_TYPE_INVALID +} + +func (x *SecurityScheme) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *SecurityScheme) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *SecurityScheme) GetIn() SecurityScheme_In { + if x != nil { + return x.xxx_hidden_In + } + return SecurityScheme_IN_INVALID +} + +func (x *SecurityScheme) GetFlow() SecurityScheme_Flow { + if x != nil { + return x.xxx_hidden_Flow + } + return SecurityScheme_FLOW_INVALID +} + +func (x *SecurityScheme) GetAuthorizationUrl() string { + if x != nil { + return x.xxx_hidden_AuthorizationUrl + } + return "" +} + +func (x *SecurityScheme) GetTokenUrl() string { + if x != nil { + return x.xxx_hidden_TokenUrl + } + return "" +} + +func (x *SecurityScheme) GetScopes() *Scopes { + if x != nil { + return x.xxx_hidden_Scopes + } + return nil +} + +func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *SecurityScheme) SetType(v SecurityScheme_Type) { + x.xxx_hidden_Type = v +} + +func (x *SecurityScheme) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *SecurityScheme) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *SecurityScheme) SetIn(v SecurityScheme_In) { + x.xxx_hidden_In = v +} + +func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) { + x.xxx_hidden_Flow = v +} + +func (x *SecurityScheme) SetAuthorizationUrl(v string) { + x.xxx_hidden_AuthorizationUrl = v +} + +func (x *SecurityScheme) SetTokenUrl(v string) { + x.xxx_hidden_TokenUrl = v +} + +func (x *SecurityScheme) SetScopes(v *Scopes) { + x.xxx_hidden_Scopes = v +} + +func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *SecurityScheme) HasScopes() bool { + if x == nil { + return false + } + return x.xxx_hidden_Scopes != nil +} + +func (x *SecurityScheme) ClearScopes() { + x.xxx_hidden_Scopes = nil +} + +type SecurityScheme_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type SecurityScheme_Type + // A short description for security scheme. + Description string + // The name of the header or query parameter to be used. + // Valid for apiKey. + Name string + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In SecurityScheme_In + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow SecurityScheme_Flow + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + AuthorizationUrl string + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + TokenUrl string + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes *Scopes + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 SecurityScheme_builder) Build() *SecurityScheme { + m0 := &SecurityScheme{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Name = b.Name + x.xxx_hidden_In = b.In + x.xxx_hidden_Flow = b.Flow + x.xxx_hidden_AuthorizationUrl = b.AuthorizationUrl + x.xxx_hidden_TokenUrl = b.TokenUrl + x.xxx_hidden_Scopes = b.Scopes + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +type SecurityRequirement struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue { + if x != nil { + return x.xxx_hidden_SecurityRequirement + } + return nil +} + +func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) { + x.xxx_hidden_SecurityRequirement = v +} + +type SecurityRequirement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue +} + +func (b0 SecurityRequirement_builder) Build() *SecurityRequirement { + m0 := &SecurityRequirement{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SecurityRequirement = b.SecurityRequirement + return m0 +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +type Scopes struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Scopes) Reset() { + *x = Scopes{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Scopes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Scopes) ProtoMessage() {} + +func (x *Scopes) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Scopes) GetScope() map[string]string { + if x != nil { + return x.xxx_hidden_Scope + } + return nil +} + +func (x *Scopes) SetScope(v map[string]string) { + x.xxx_hidden_Scope = v +} + +type Scopes_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Maps between a name of a scope to a short description of it (as the value + // of the property). + Scope map[string]string +} + +func (b0 Scopes_builder) Build() *Scopes { + m0 := &Scopes{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Scope = b.Scope + return m0 +} + +// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. +// These properties are not defined by OpenAPIv2, but they are used to control the generation. +type JSONSchema_FieldConfiguration struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema_FieldConfiguration) Reset() { + *x = JSONSchema_FieldConfiguration{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema_FieldConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema_FieldConfiguration) ProtoMessage() {} + +func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema_FieldConfiguration) GetPathParamName() string { + if x != nil { + return x.xxx_hidden_PathParamName + } + return "" +} + +func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) { + x.xxx_hidden_PathParamName = v +} + +type JSONSchema_FieldConfiguration_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + PathParamName string +} + +func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration { + m0 := &JSONSchema_FieldConfiguration{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_PathParamName = b.PathParamName + return m0 +} + +// If the security scheme is of type "oauth2", then the value is a list of +// scope names required for the execution. For other security scheme types, +// the array MUST be empty. +type SecurityRequirement_SecurityRequirementValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement_SecurityRequirementValue) Reset() { + *x = SecurityRequirement_SecurityRequirementValue{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement_SecurityRequirementValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {} + +func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string { + if x != nil { + return x.xxx_hidden_Scope + } + return nil +} + +func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) { + x.xxx_hidden_Scope = v +} + +type SecurityRequirement_SecurityRequirementValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Scope []string +} + +func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue { + m0 := &SecurityRequirement_SecurityRequirementValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Scope = b.Scope + return m0 +} + +var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67, + 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07, + 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, + 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a, + 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, + 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, + 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, + 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, + 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, + 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, + 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, + 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, + 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a, + 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, + 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, + 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, + 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, + 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18, + 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70, + 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12, + 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, + 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04, + 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e, + 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a, + 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, + 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, + 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, + 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10, + 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c, + 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, + 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50, + 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, + 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04, + 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, + 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06, + 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42, + 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{ + (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme + (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters + (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header + (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response + (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info + (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact + (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License + (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes + nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + (*structpb.Value)(nil), // 41: google.protobuf.Value +} +var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{ + 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info + 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters + 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact + 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License + 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes + 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value + 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value + 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header + 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value + 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value + 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + 53, // [53:53] is the sub-list for method output_type + 53, // [53:53] is the sub-list for method input_type + 53, // [53:53] is the sub-list for extension type_name + 53, // [53:53] is the sub-list for extension extendee + 0, // [0:53] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() } +func file_protoc_gen_openapiv2_options_openapiv2_proto_init() { + if File_protoc_gen_openapiv2_options_openapiv2_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc, + NumEnums: 6, + NumMessages: 35, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs, + EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes, + MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes, + }.Build() + File_protoc_gen_openapiv2_options_openapiv2_proto = out.File + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 0fa90765661..f0727cf7c06 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -196,7 +196,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha return } - if !doForwardTrailers { + if !doForwardTrailers && mux.writeContentLength { w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 60c2065ddcb..19255ec441e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -71,6 +71,7 @@ type ServeMux struct { routingErrorHandler RoutingErrorHandlerFunc disablePathLengthFallback bool unescapingMode UnescapingMode + writeContentLength bool } // ServeMuxOption is an option that can be given to a ServeMux on construction. @@ -258,6 +259,13 @@ func WithDisablePathLengthFallback() ServeMuxOption { } } +// WithWriteContentLength returns a ServeMuxOption to enable writing content length on non-streaming responses +func WithWriteContentLength() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.writeContentLength = true + } +} + // WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. // When called the handler will forward the request to the upstream grpc service health check (defined in the // gRPC Health Checking Protocol). diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index 93fb09922fb..8549dfb97af 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -126,6 +126,15 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } } + // Check if oneof already set + if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { + if f := msgValue.WhichOneof(of); f != nil { + if fieldDescriptor.Message() == nil || fieldDescriptor.FullName() != f.FullName() { + return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) + } + } + } + // If this is the last element, we're done if i == len(fieldPath)-1 { break @@ -140,13 +149,6 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin msgValue = msgValue.Mutable(fieldDescriptor).Message() } - // Check if oneof already set - if of := fieldDescriptor.ContainingOneof(); of != nil { - if f := msgValue.WhichOneof(of); f != nil { - return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) - } - } - switch { case fieldDescriptor.IsList(): return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values) diff --git a/vendor/github.com/open-policy-agent/opa/ast/annotations.go b/vendor/github.com/open-policy-agent/opa/ast/annotations.go index d6267a0e646..3bc5fb36a51 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/annotations.go +++ b/vendor/github.com/open-policy-agent/opa/ast/annotations.go @@ -5,973 +5,33 @@ package ast import ( - "encoding/json" - "fmt" - "net/url" - "sort" - "strings" - - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/internal/deepcopy" - "github.com/open-policy-agent/opa/util" -) - -const ( - annotationScopePackage = "package" - annotationScopeImport = "import" - annotationScopeRule = "rule" - annotationScopeDocument = "document" - annotationScopeSubpackages = "subpackages" + v1 "github.com/open-policy-agent/opa/v1/ast" ) type ( // Annotations represents metadata attached to other AST nodes such as rules. - Annotations struct { - Scope string `json:"scope"` - Title string `json:"title,omitempty"` - Entrypoint bool `json:"entrypoint,omitempty"` - Description string `json:"description,omitempty"` - Organizations []string `json:"organizations,omitempty"` - RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"` - Authors []*AuthorAnnotation `json:"authors,omitempty"` - Schemas []*SchemaAnnotation `json:"schemas,omitempty"` - Custom map[string]interface{} `json:"custom,omitempty"` - Location *Location `json:"location,omitempty"` - - comments []*Comment - node Node - jsonOptions astJSON.Options - } + Annotations = v1.Annotations // SchemaAnnotation contains a schema declaration for the document identified by the path. - SchemaAnnotation struct { - Path Ref `json:"path"` - Schema Ref `json:"schema,omitempty"` - Definition *interface{} `json:"definition,omitempty"` - } - - AuthorAnnotation struct { - Name string `json:"name"` - Email string `json:"email,omitempty"` - } - - RelatedResourceAnnotation struct { - Ref url.URL `json:"ref"` - Description string `json:"description,omitempty"` - } - - AnnotationSet struct { - byRule map[*Rule][]*Annotations - byPackage map[int]*Annotations - byPath *annotationTreeNode - modules []*Module // Modules this set was constructed from - } + SchemaAnnotation = v1.SchemaAnnotation - annotationTreeNode struct { - Value *Annotations - Children map[Value]*annotationTreeNode // we assume key elements are hashable (vars and strings only!) - } + AuthorAnnotation = v1.AuthorAnnotation - AnnotationsRef struct { - Path Ref `json:"path"` // The path of the node the annotations are applied to - Annotations *Annotations `json:"annotations,omitempty"` - Location *Location `json:"location,omitempty"` // The location of the node the annotations are applied to + RelatedResourceAnnotation = v1.RelatedResourceAnnotation - jsonOptions astJSON.Options + AnnotationSet = v1.AnnotationSet - node Node // The node the annotations are applied to - } + AnnotationsRef = v1.AnnotationsRef - AnnotationsRefSet []*AnnotationsRef + AnnotationsRefSet = v1.AnnotationsRefSet - FlatAnnotationsRefSet AnnotationsRefSet + FlatAnnotationsRefSet = v1.FlatAnnotationsRefSet ) -func (a *Annotations) String() string { - bs, _ := a.MarshalJSON() - return string(bs) -} - -// Loc returns the location of this annotation. -func (a *Annotations) Loc() *Location { - return a.Location -} - -// SetLoc updates the location of this annotation. -func (a *Annotations) SetLoc(l *Location) { - a.Location = l -} - -// EndLoc returns the location of this annotation's last comment line. -func (a *Annotations) EndLoc() *Location { - count := len(a.comments) - if count == 0 { - return a.Location - } - return a.comments[count-1].Location -} - -// Compare returns an integer indicating if a is less than, equal to, or greater -// than other. -func (a *Annotations) Compare(other *Annotations) int { - - if a == nil && other == nil { - return 0 - } - - if a == nil { - return -1 - } - - if other == nil { - return 1 - } - - if cmp := scopeCompare(a.Scope, other.Scope); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(a.Title, other.Title); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(a.Description, other.Description); cmp != 0 { - return cmp - } - - if cmp := compareStringLists(a.Organizations, other.Organizations); cmp != 0 { - return cmp - } - - if cmp := compareRelatedResources(a.RelatedResources, other.RelatedResources); cmp != 0 { - return cmp - } - - if cmp := compareAuthors(a.Authors, other.Authors); cmp != 0 { - return cmp - } - - if cmp := compareSchemas(a.Schemas, other.Schemas); cmp != 0 { - return cmp - } - - if a.Entrypoint != other.Entrypoint { - if a.Entrypoint { - return 1 - } - return -1 - } - - if cmp := util.Compare(a.Custom, other.Custom); cmp != 0 { - return cmp - } - - return 0 -} - -// GetTargetPath returns the path of the node these Annotations are applied to (the target) -func (a *Annotations) GetTargetPath() Ref { - switch n := a.node.(type) { - case *Package: - return n.Path - case *Rule: - return n.Ref().GroundPrefix() - default: - return nil - } -} - -func (a *Annotations) setJSONOptions(opts astJSON.Options) { - a.jsonOptions = opts - if a.Location != nil { - a.Location.JSONOptions = opts - } -} - -func (a *Annotations) MarshalJSON() ([]byte, error) { - if a == nil { - return []byte(`{"scope":""}`), nil - } - - data := map[string]interface{}{ - "scope": a.Scope, - } - - if a.Title != "" { - data["title"] = a.Title - } - - if a.Description != "" { - data["description"] = a.Description - } - - if a.Entrypoint { - data["entrypoint"] = a.Entrypoint - } - - if len(a.Organizations) > 0 { - data["organizations"] = a.Organizations - } - - if len(a.RelatedResources) > 0 { - data["related_resources"] = a.RelatedResources - } - - if len(a.Authors) > 0 { - data["authors"] = a.Authors - } - - if len(a.Schemas) > 0 { - data["schemas"] = a.Schemas - } - - if len(a.Custom) > 0 { - data["custom"] = a.Custom - } - - if a.jsonOptions.MarshalOptions.IncludeLocation.Annotations { - if a.Location != nil { - data["location"] = a.Location - } - } - - return json.Marshal(data) -} - func NewAnnotationsRef(a *Annotations) *AnnotationsRef { - var loc *Location - if a.node != nil { - loc = a.node.Loc() - } - - return &AnnotationsRef{ - Location: loc, - Path: a.GetTargetPath(), - Annotations: a, - node: a.node, - jsonOptions: a.jsonOptions, - } -} - -func (ar *AnnotationsRef) GetPackage() *Package { - switch n := ar.node.(type) { - case *Package: - return n - case *Rule: - return n.Module.Package - default: - return nil - } -} - -func (ar *AnnotationsRef) GetRule() *Rule { - switch n := ar.node.(type) { - case *Rule: - return n - default: - return nil - } -} - -func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "path": ar.Path, - } - - if ar.Annotations != nil { - data["annotations"] = ar.Annotations - } - - if ar.jsonOptions.MarshalOptions.IncludeLocation.AnnotationsRef { - if ar.Location != nil { - data["location"] = ar.Location - } - } - - return json.Marshal(data) -} - -func scopeCompare(s1, s2 string) int { - - o1 := scopeOrder(s1) - o2 := scopeOrder(s2) - - if o2 < o1 { - return 1 - } else if o2 > o1 { - return -1 - } - - if s1 < s2 { - return -1 - } else if s2 < s1 { - return 1 - } - - return 0 -} - -func scopeOrder(s string) int { - switch s { - case annotationScopeRule: - return 1 - } - return 0 -} - -func compareAuthors(a, b []*AuthorAnnotation) int { - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - for i := 0; i < len(a); i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - - return 0 -} - -func compareRelatedResources(a, b []*RelatedResourceAnnotation) int { - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - for i := 0; i < len(a); i++ { - if cmp := strings.Compare(a[i].String(), b[i].String()); cmp != 0 { - return cmp - } - } - - return 0 -} - -func compareSchemas(a, b []*SchemaAnnotation) int { - maxLen := len(a) - if len(b) < maxLen { - maxLen = len(b) - } - - for i := 0; i < maxLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - return 0 -} - -func compareStringLists(a, b []string) int { - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - for i := 0; i < len(a); i++ { - if cmp := strings.Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - - return 0 -} - -// Copy returns a deep copy of s. -func (a *Annotations) Copy(node Node) *Annotations { - cpy := *a - - cpy.Organizations = make([]string, len(a.Organizations)) - copy(cpy.Organizations, a.Organizations) - - cpy.RelatedResources = make([]*RelatedResourceAnnotation, len(a.RelatedResources)) - for i := range a.RelatedResources { - cpy.RelatedResources[i] = a.RelatedResources[i].Copy() - } - - cpy.Authors = make([]*AuthorAnnotation, len(a.Authors)) - for i := range a.Authors { - cpy.Authors[i] = a.Authors[i].Copy() - } - - cpy.Schemas = make([]*SchemaAnnotation, len(a.Schemas)) - for i := range a.Schemas { - cpy.Schemas[i] = a.Schemas[i].Copy() - } - - cpy.Custom = deepcopy.Map(a.Custom) - - cpy.node = node - - return &cpy -} - -// toObject constructs an AST Object from the annotation. -func (a *Annotations) toObject() (*Object, *Error) { - obj := NewObject() - - if a == nil { - return &obj, nil - } - - if len(a.Scope) > 0 { - obj.Insert(StringTerm("scope"), StringTerm(a.Scope)) - } - - if len(a.Title) > 0 { - obj.Insert(StringTerm("title"), StringTerm(a.Title)) - } - - if a.Entrypoint { - obj.Insert(StringTerm("entrypoint"), BooleanTerm(true)) - } - - if len(a.Description) > 0 { - obj.Insert(StringTerm("description"), StringTerm(a.Description)) - } - - if len(a.Organizations) > 0 { - orgs := make([]*Term, 0, len(a.Organizations)) - for _, org := range a.Organizations { - orgs = append(orgs, StringTerm(org)) - } - obj.Insert(StringTerm("organizations"), ArrayTerm(orgs...)) - } - - if len(a.RelatedResources) > 0 { - rrs := make([]*Term, 0, len(a.RelatedResources)) - for _, rr := range a.RelatedResources { - rrObj := NewObject(Item(StringTerm("ref"), StringTerm(rr.Ref.String()))) - if len(rr.Description) > 0 { - rrObj.Insert(StringTerm("description"), StringTerm(rr.Description)) - } - rrs = append(rrs, NewTerm(rrObj)) - } - obj.Insert(StringTerm("related_resources"), ArrayTerm(rrs...)) - } - - if len(a.Authors) > 0 { - as := make([]*Term, 0, len(a.Authors)) - for _, author := range a.Authors { - aObj := NewObject() - if len(author.Name) > 0 { - aObj.Insert(StringTerm("name"), StringTerm(author.Name)) - } - if len(author.Email) > 0 { - aObj.Insert(StringTerm("email"), StringTerm(author.Email)) - } - as = append(as, NewTerm(aObj)) - } - obj.Insert(StringTerm("authors"), ArrayTerm(as...)) - } - - if len(a.Schemas) > 0 { - ss := make([]*Term, 0, len(a.Schemas)) - for _, s := range a.Schemas { - sObj := NewObject() - if len(s.Path) > 0 { - sObj.Insert(StringTerm("path"), NewTerm(s.Path.toArray())) - } - if len(s.Schema) > 0 { - sObj.Insert(StringTerm("schema"), NewTerm(s.Schema.toArray())) - } - if s.Definition != nil { - def, err := InterfaceToValue(s.Definition) - if err != nil { - return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error()) - } - sObj.Insert(StringTerm("definition"), NewTerm(def)) - } - ss = append(ss, NewTerm(sObj)) - } - obj.Insert(StringTerm("schemas"), ArrayTerm(ss...)) - } - - if len(a.Custom) > 0 { - c, err := InterfaceToValue(a.Custom) - if err != nil { - return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error()) - } - obj.Insert(StringTerm("custom"), NewTerm(c)) - } - - return &obj, nil -} - -func attachRuleAnnotations(mod *Module) { - // make a copy of the annotations - cpy := make([]*Annotations, len(mod.Annotations)) - for i, a := range mod.Annotations { - cpy[i] = a.Copy(a.node) - } - - for _, rule := range mod.Rules { - var j int - var found bool - for i, a := range cpy { - if rule.Ref().GroundPrefix().Equal(a.GetTargetPath()) { - if a.Scope == annotationScopeDocument { - rule.Annotations = append(rule.Annotations, a) - } else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row { - j = i - found = true - rule.Annotations = append(rule.Annotations, a) - } - } - } - - if found && j < len(cpy) { - cpy = append(cpy[:j], cpy[j+1:]...) - } - } -} - -func attachAnnotationsNodes(mod *Module) Errors { - var errs Errors - - // Find first non-annotation statement following each annotation and attach - // the annotation to that statement. - for _, a := range mod.Annotations { - for _, stmt := range mod.stmts { - _, ok := stmt.(*Annotations) - if !ok { - if stmt.Loc().Row > a.Location.Row { - a.node = stmt - break - } - } - } - - if a.Scope == "" { - switch a.node.(type) { - case *Rule: - if a.Entrypoint { - a.Scope = annotationScopeDocument - } else { - a.Scope = annotationScopeRule - } - case *Package: - a.Scope = annotationScopePackage - case *Import: - a.Scope = annotationScopeImport - } - } - - if err := validateAnnotationScopeAttachment(a); err != nil { - errs = append(errs, err) - } - - if err := validateAnnotationEntrypointAttachment(a); err != nil { - errs = append(errs, err) - } - } - - return errs -} - -func validateAnnotationScopeAttachment(a *Annotations) *Error { - - switch a.Scope { - case annotationScopeRule, annotationScopeDocument: - if _, ok := a.node.(*Rule); ok { - return nil - } - return newScopeAttachmentErr(a, "rule") - case annotationScopePackage, annotationScopeSubpackages: - if _, ok := a.node.(*Package); ok { - return nil - } - return newScopeAttachmentErr(a, "package") - } - - return NewError(ParseErr, a.Loc(), "invalid annotation scope '%v'. Use one of '%s', '%s', '%s', or '%s'", - a.Scope, annotationScopeRule, annotationScopeDocument, annotationScopePackage, annotationScopeSubpackages) -} - -func validateAnnotationEntrypointAttachment(a *Annotations) *Error { - if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) { - return NewError( - ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope) - } - return nil -} - -// Copy returns a deep copy of a. -func (a *AuthorAnnotation) Copy() *AuthorAnnotation { - cpy := *a - return &cpy -} - -// Compare returns an integer indicating if s is less than, equal to, or greater -// than other. -func (a *AuthorAnnotation) Compare(other *AuthorAnnotation) int { - if cmp := strings.Compare(a.Name, other.Name); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(a.Email, other.Email); cmp != 0 { - return cmp - } - - return 0 -} - -func (a *AuthorAnnotation) String() string { - if len(a.Email) == 0 { - return a.Name - } else if len(a.Name) == 0 { - return fmt.Sprintf("<%s>", a.Email) - } - return fmt.Sprintf("%s <%s>", a.Name, a.Email) -} - -// Copy returns a deep copy of rr. -func (rr *RelatedResourceAnnotation) Copy() *RelatedResourceAnnotation { - cpy := *rr - return &cpy -} - -// Compare returns an integer indicating if s is less than, equal to, or greater -// than other. -func (rr *RelatedResourceAnnotation) Compare(other *RelatedResourceAnnotation) int { - if cmp := strings.Compare(rr.Description, other.Description); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(rr.Ref.String(), other.Ref.String()); cmp != 0 { - return cmp - } - - return 0 -} - -func (rr *RelatedResourceAnnotation) String() string { - bs, _ := json.Marshal(rr) - return string(bs) -} - -func (rr *RelatedResourceAnnotation) MarshalJSON() ([]byte, error) { - d := map[string]interface{}{ - "ref": rr.Ref.String(), - } - - if len(rr.Description) > 0 { - d["description"] = rr.Description - } - - return json.Marshal(d) -} - -// Copy returns a deep copy of s. -func (s *SchemaAnnotation) Copy() *SchemaAnnotation { - cpy := *s - return &cpy -} - -// Compare returns an integer indicating if s is less than, equal to, or greater -// than other. -func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int { - - if cmp := s.Path.Compare(other.Path); cmp != 0 { - return cmp - } - - if cmp := s.Schema.Compare(other.Schema); cmp != 0 { - return cmp - } - - if s.Definition != nil && other.Definition == nil { - return -1 - } else if s.Definition == nil && other.Definition != nil { - return 1 - } else if s.Definition != nil && other.Definition != nil { - return util.Compare(*s.Definition, *other.Definition) - } - - return 0 -} - -func (s *SchemaAnnotation) String() string { - bs, _ := json.Marshal(s) - return string(bs) -} - -func newAnnotationSet() *AnnotationSet { - return &AnnotationSet{ - byRule: map[*Rule][]*Annotations{}, - byPackage: map[int]*Annotations{}, - byPath: newAnnotationTree(), - } + return v1.NewAnnotationsRef(a) } func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) { - as := newAnnotationSet() - var errs Errors - for _, m := range modules { - for _, a := range m.Annotations { - if err := as.add(a); err != nil { - errs = append(errs, err) - } - } - } - if len(errs) > 0 { - return nil, errs - } - as.modules = modules - return as, nil -} - -// NOTE(philipc): During copy propagation, the underlying Nodes can be -// stripped away from the annotations, leading to nil deref panics. We -// silently ignore these cases for now, as a workaround. -func (as *AnnotationSet) add(a *Annotations) *Error { - switch a.Scope { - case annotationScopeRule: - if rule, ok := a.node.(*Rule); ok { - as.byRule[rule] = append(as.byRule[rule], a) - } - case annotationScopePackage: - if pkg, ok := a.node.(*Package); ok { - hash := pkg.Path.Hash() - if exist, ok := as.byPackage[hash]; ok { - return errAnnotationRedeclared(a, exist.Location) - } - as.byPackage[hash] = a - } - case annotationScopeDocument: - if rule, ok := a.node.(*Rule); ok { - path := rule.Ref().GroundPrefix() - x := as.byPath.get(path) - if x != nil { - return errAnnotationRedeclared(a, x.Value.Location) - } - as.byPath.insert(path, a) - } - case annotationScopeSubpackages: - if pkg, ok := a.node.(*Package); ok { - x := as.byPath.get(pkg.Path) - if x != nil && x.Value != nil { - return errAnnotationRedeclared(a, x.Value.Location) - } - as.byPath.insert(pkg.Path, a) - } - } - return nil -} - -func (as *AnnotationSet) GetRuleScope(r *Rule) []*Annotations { - if as == nil { - return nil - } - return as.byRule[r] -} - -func (as *AnnotationSet) GetSubpackagesScope(path Ref) []*Annotations { - if as == nil { - return nil - } - return as.byPath.ancestors(path) -} - -func (as *AnnotationSet) GetDocumentScope(path Ref) *Annotations { - if as == nil { - return nil - } - if node := as.byPath.get(path); node != nil { - return node.Value - } - return nil -} - -func (as *AnnotationSet) GetPackageScope(pkg *Package) *Annotations { - if as == nil { - return nil - } - return as.byPackage[pkg.Path.Hash()] -} - -// Flatten returns a flattened list view of this AnnotationSet. -// The returned slice is sorted, first by the annotations' target path, then by their target location -func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet { - // This preallocation often won't be optimal, but it's superior to starting with a nil slice. - refs := make([]*AnnotationsRef, 0, len(as.byPath.Children)+len(as.byRule)+len(as.byPackage)) - - refs = as.byPath.flatten(refs) - - for _, a := range as.byPackage { - refs = append(refs, NewAnnotationsRef(a)) - } - - for _, as := range as.byRule { - for _, a := range as { - refs = append(refs, NewAnnotationsRef(a)) - } - } - - // Sort by path, then annotation location, for stable output - sort.SliceStable(refs, func(i, j int) bool { - return refs[i].Compare(refs[j]) < 0 - }) - - return refs -} - -// Chain returns the chain of annotations leading up to the given rule. -// The returned slice is ordered as follows -// 0. Entries for the given rule, ordered from the METADATA block declared immediately above the rule, to the block declared farthest away (always at least one entry) -// 1. The 'document' scope entry, if any -// 2. The 'package' scope entry, if any -// 3. Entries for the 'subpackages' scope, if any; ordered from the closest package path to the fartest. E.g.: 'do.re.mi', 'do.re', 'do' -// The returned slice is guaranteed to always contain at least one entry, corresponding to the given rule. -func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet { - var refs []*AnnotationsRef - - ruleAnnots := as.GetRuleScope(rule) - - if len(ruleAnnots) >= 1 { - for _, a := range ruleAnnots { - refs = append(refs, NewAnnotationsRef(a)) - } - } else { - // Make sure there is always a leading entry representing the passed rule, even if it has no annotations - refs = append(refs, &AnnotationsRef{ - Location: rule.Location, - Path: rule.Ref().GroundPrefix(), - node: rule, - }) - } - - if len(refs) > 1 { - // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward - sort.SliceStable(refs, func(i, j int) bool { - return refs[i].Annotations.Location.Compare(refs[j].Annotations.Location) > 0 - }) - } - - docAnnots := as.GetDocumentScope(rule.Ref().GroundPrefix()) - if docAnnots != nil { - refs = append(refs, NewAnnotationsRef(docAnnots)) - } - - pkg := rule.Module.Package - pkgAnnots := as.GetPackageScope(pkg) - if pkgAnnots != nil { - refs = append(refs, NewAnnotationsRef(pkgAnnots)) - } - - subPkgAnnots := as.GetSubpackagesScope(pkg.Path) - // We need to reverse the order, as subPkgAnnots ordering will start at the root, - // whereas we want to end at the root. - for i := len(subPkgAnnots) - 1; i >= 0; i-- { - refs = append(refs, NewAnnotationsRef(subPkgAnnots[i])) - } - - return refs -} - -func (ars FlatAnnotationsRefSet) Insert(ar *AnnotationsRef) FlatAnnotationsRefSet { - result := make(FlatAnnotationsRefSet, 0, len(ars)+1) - - // insertion sort, first by path, then location - for i, current := range ars { - if ar.Compare(current) < 0 { - result = append(result, ar) - result = append(result, ars[i:]...) - break - } - result = append(result, current) - } - - if len(result) < len(ars)+1 { - result = append(result, ar) - } - - return result -} - -func newAnnotationTree() *annotationTreeNode { - return &annotationTreeNode{ - Value: nil, - Children: map[Value]*annotationTreeNode{}, - } -} - -func (t *annotationTreeNode) insert(path Ref, value *Annotations) { - node := t - for _, k := range path { - child, ok := node.Children[k.Value] - if !ok { - child = newAnnotationTree() - node.Children[k.Value] = child - } - node = child - } - node.Value = value -} - -func (t *annotationTreeNode) get(path Ref) *annotationTreeNode { - node := t - for _, k := range path { - if node == nil { - return nil - } - child, ok := node.Children[k.Value] - if !ok { - return nil - } - node = child - } - return node -} - -// ancestors returns a slice of annotations in ascending order, starting with the root of ref; e.g.: 'root', 'root.foo', 'root.foo.bar'. -func (t *annotationTreeNode) ancestors(path Ref) (result []*Annotations) { - node := t - for _, k := range path { - if node == nil { - return result - } - child, ok := node.Children[k.Value] - if !ok { - return result - } - if child.Value != nil { - result = append(result, child.Value) - } - node = child - } - return result -} - -func (t *annotationTreeNode) flatten(refs []*AnnotationsRef) []*AnnotationsRef { - if a := t.Value; a != nil { - refs = append(refs, NewAnnotationsRef(a)) - } - for _, c := range t.Children { - refs = c.flatten(refs) - } - return refs -} - -func (ar *AnnotationsRef) Compare(other *AnnotationsRef) int { - if c := ar.Path.Compare(other.Path); c != 0 { - return c - } - - if c := ar.Annotations.Location.Compare(other.Annotations.Location); c != 0 { - return c - } - - return ar.Annotations.Compare(other.Annotations) + return v1.BuildAnnotationSet(modules) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/ast/builtins.go index f54d91d3172..d0ab69a1630 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/ast/builtins.go @@ -5,1348 +5,230 @@ package ast import ( - "strings" - - "github.com/open-policy-agent/opa/types" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Builtins is the registry of built-in functions supported by OPA. // Call RegisterBuiltin to add a new built-in. -var Builtins []*Builtin +var Builtins = v1.Builtins // RegisterBuiltin adds a new built-in function to the registry. func RegisterBuiltin(b *Builtin) { - Builtins = append(Builtins, b) - BuiltinMap[b.Name] = b - if len(b.Infix) > 0 { - BuiltinMap[b.Infix] = b - } + v1.RegisterBuiltin(b) } // DefaultBuiltins is the registry of built-in functions supported in OPA // by default. When adding a new built-in function to OPA, update this // list. -var DefaultBuiltins = [...]*Builtin{ - // Unification/equality ("=") - Equality, - - // Assignment (":=") - Assign, - - // Membership, infix "in": `x in xs` - Member, - MemberWithKey, - - // Comparisons - GreaterThan, - GreaterThanEq, - LessThan, - LessThanEq, - NotEqual, - Equal, - - // Arithmetic - Plus, - Minus, - Multiply, - Divide, - Ceil, - Floor, - Round, - Abs, - Rem, - - // Bitwise Arithmetic - BitsOr, - BitsAnd, - BitsNegate, - BitsXOr, - BitsShiftLeft, - BitsShiftRight, - - // Binary - And, - Or, - - // Aggregates - Count, - Sum, - Product, - Max, - Min, - Any, - All, - - // Arrays - ArrayConcat, - ArraySlice, - ArrayReverse, - - // Conversions - ToNumber, - - // Casts (DEPRECATED) - CastObject, - CastNull, - CastBoolean, - CastString, - CastSet, - CastArray, - - // Regular Expressions - RegexIsValid, - RegexMatch, - RegexMatchDeprecated, - RegexSplit, - GlobsMatch, - RegexTemplateMatch, - RegexFind, - RegexFindAllStringSubmatch, - RegexReplace, - - // Sets - SetDiff, - Intersection, - Union, - - // Strings - AnyPrefixMatch, - AnySuffixMatch, - Concat, - FormatInt, - IndexOf, - IndexOfN, - Substring, - Lower, - Upper, - Contains, - StringCount, - StartsWith, - EndsWith, - Split, - Replace, - ReplaceN, - Trim, - TrimLeft, - TrimPrefix, - TrimRight, - TrimSuffix, - TrimSpace, - Sprintf, - StringReverse, - RenderTemplate, - - // Numbers - NumbersRange, - NumbersRangeStep, - RandIntn, - - // Encoding - JSONMarshal, - JSONMarshalWithOptions, - JSONUnmarshal, - JSONIsValid, - Base64Encode, - Base64Decode, - Base64IsValid, - Base64UrlEncode, - Base64UrlEncodeNoPad, - Base64UrlDecode, - URLQueryDecode, - URLQueryEncode, - URLQueryEncodeObject, - URLQueryDecodeObject, - YAMLMarshal, - YAMLUnmarshal, - YAMLIsValid, - HexEncode, - HexDecode, - - // Object Manipulation - ObjectUnion, - ObjectUnionN, - ObjectRemove, - ObjectFilter, - ObjectGet, - ObjectKeys, - ObjectSubset, - - // JSON Object Manipulation - JSONFilter, - JSONRemove, - JSONPatch, - - // Tokens - JWTDecode, - JWTVerifyRS256, - JWTVerifyRS384, - JWTVerifyRS512, - JWTVerifyPS256, - JWTVerifyPS384, - JWTVerifyPS512, - JWTVerifyES256, - JWTVerifyES384, - JWTVerifyES512, - JWTVerifyHS256, - JWTVerifyHS384, - JWTVerifyHS512, - JWTDecodeVerify, - JWTEncodeSignRaw, - JWTEncodeSign, - - // Time - NowNanos, - ParseNanos, - ParseRFC3339Nanos, - ParseDurationNanos, - Format, - Date, - Clock, - Weekday, - AddDate, - Diff, - - // Crypto - CryptoX509ParseCertificates, - CryptoX509ParseAndVerifyCertificates, - CryptoX509ParseAndVerifyCertificatesWithOptions, - CryptoMd5, - CryptoSha1, - CryptoSha256, - CryptoX509ParseCertificateRequest, - CryptoX509ParseRSAPrivateKey, - CryptoX509ParseKeyPair, - CryptoParsePrivateKeys, - CryptoHmacMd5, - CryptoHmacSha1, - CryptoHmacSha256, - CryptoHmacSha512, - CryptoHmacEqual, - - // Graphs - WalkBuiltin, - ReachableBuiltin, - ReachablePathsBuiltin, - - // Sort - Sort, - - // Types - IsNumber, - IsString, - IsBoolean, - IsArray, - IsSet, - IsObject, - IsNull, - TypeNameBuiltin, - - // HTTP - HTTPSend, - - // GraphQL - GraphQLParse, - GraphQLParseAndVerify, - GraphQLParseQuery, - GraphQLParseSchema, - GraphQLIsValid, - GraphQLSchemaIsValid, - - // JSON Schema - JSONSchemaVerify, - JSONMatchSchema, - - // Cloud Provider Helpers - ProvidersAWSSignReqObj, - - // Rego - RegoParseModule, - RegoMetadataChain, - RegoMetadataRule, - - // OPA - OPARuntime, - - // Tracing - Trace, - - // Networking - NetCIDROverlap, - NetCIDRIntersects, - NetCIDRContains, - NetCIDRContainsMatches, - NetCIDRExpand, - NetCIDRMerge, - NetLookupIPAddr, - NetCIDRIsValid, - - // Glob - GlobMatch, - GlobQuoteMeta, - - // Units - UnitsParse, - UnitsParseBytes, - - // UUIDs - UUIDRFC4122, - UUIDParse, - - // SemVers - SemVerIsValid, - SemVerCompare, - - // Printing - Print, - InternalPrint, -} +var DefaultBuiltins = v1.DefaultBuiltins // BuiltinMap provides a convenient mapping of built-in names to // built-in definitions. -var BuiltinMap map[string]*Builtin +var BuiltinMap = v1.BuiltinMap // Deprecated: Builtins can now be directly annotated with the // Nondeterministic property, and when set to true, will be ignored // for partial evaluation. -var IgnoreDuringPartialEval = []*Builtin{ - RandIntn, - UUIDRFC4122, - JWTDecodeVerify, - JWTEncodeSignRaw, - JWTEncodeSign, - NowNanos, - HTTPSend, - OPARuntime, - NetLookupIPAddr, -} +var IgnoreDuringPartialEval = v1.IgnoreDuringPartialEval /** * Unification */ // Equality represents the "=" operator. -var Equality = &Builtin{ - Name: "eq", - Infix: "=", - Decl: types.NewFunction( - types.Args(types.A, types.A), - types.B, - ), -} +var Equality = v1.Equality /** * Assignment */ // Assign represents the assignment (":=") operator. -var Assign = &Builtin{ - Name: "assign", - Infix: ":=", - Decl: types.NewFunction( - types.Args(types.A, types.A), - types.B, - ), -} +var Assign = v1.Assign // Member represents the `in` (infix) operator. -var Member = &Builtin{ - Name: "internal.member_2", - Infix: "in", - Decl: types.NewFunction( - types.Args( - types.A, - types.A, - ), - types.B, - ), -} +var Member = v1.Member // MemberWithKey represents the `in` (infix) operator when used // with two terms on the lhs, i.e., `k, v in obj`. -var MemberWithKey = &Builtin{ - Name: "internal.member_3", - Infix: "in", - Decl: types.NewFunction( - types.Args( - types.A, - types.A, - types.A, - ), - types.B, - ), -} +var MemberWithKey = v1.MemberWithKey -/** - * Comparisons - */ -var comparison = category("comparison") - -var GreaterThan = &Builtin{ - Name: "gt", - Infix: ">", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is greater than `y`; false otherwise"), - ), -} +var GreaterThan = v1.GreaterThan -var GreaterThanEq = &Builtin{ - Name: "gte", - Infix: ">=", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is greater or equal to `y`; false otherwise"), - ), -} +var GreaterThanEq = v1.GreaterThanEq // LessThan represents the "<" comparison operator. -var LessThan = &Builtin{ - Name: "lt", - Infix: "<", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is less than `y`; false otherwise"), - ), -} +var LessThan = v1.LessThan -var LessThanEq = &Builtin{ - Name: "lte", - Infix: "<=", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is less than or equal to `y`; false otherwise"), - ), -} +var LessThanEq = v1.LessThanEq -var NotEqual = &Builtin{ - Name: "neq", - Infix: "!=", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is not equal to `y`; false otherwise"), - ), -} +var NotEqual = v1.NotEqual // Equal represents the "==" comparison operator. -var Equal = &Builtin{ - Name: "equal", - Infix: "==", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is equal to `y`; false otherwise"), - ), -} +var Equal = v1.Equal -/** - * Arithmetic - */ -var number = category("numbers") - -var Plus = &Builtin{ - Name: "plus", - Infix: "+", - Description: "Plus adds two numbers together.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N).Description("the sum of `x` and `y`"), - ), - Categories: number, -} +var Plus = v1.Plus -var Minus = &Builtin{ - Name: "minus", - Infix: "-", - Description: "Minus subtracts the second number from the first number or computes the difference between two sets.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny(types.N, types.NewSet(types.A))), - types.Named("y", types.NewAny(types.N, types.NewSet(types.A))), - ), - types.Named("z", types.NewAny(types.N, types.NewSet(types.A))).Description("the difference of `x` and `y`"), - ), - Categories: category("sets", "numbers"), -} +var Minus = v1.Minus -var Multiply = &Builtin{ - Name: "mul", - Infix: "*", - Description: "Multiplies two numbers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N).Description("the product of `x` and `y`"), - ), - Categories: number, -} +var Multiply = v1.Multiply -var Divide = &Builtin{ - Name: "div", - Infix: "/", - Description: "Divides the first number by the second number.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the dividend"), - types.Named("y", types.N).Description("the divisor"), - ), - types.Named("z", types.N).Description("the result of `x` divided by `y`"), - ), - Categories: number, -} +var Divide = v1.Divide -var Round = &Builtin{ - Name: "round", - Description: "Rounds the number to the nearest integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the number to round"), - ), - types.Named("y", types.N).Description("the result of rounding `x`"), - ), - Categories: number, -} +var Round = v1.Round -var Ceil = &Builtin{ - Name: "ceil", - Description: "Rounds the number _up_ to the nearest integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the number to round"), - ), - types.Named("y", types.N).Description("the result of rounding `x` _up_"), - ), - Categories: number, -} +var Ceil = v1.Ceil -var Floor = &Builtin{ - Name: "floor", - Description: "Rounds the number _down_ to the nearest integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the number to round"), - ), - types.Named("y", types.N).Description("the result of rounding `x` _down_"), - ), - Categories: number, -} +var Floor = v1.Floor -var Abs = &Builtin{ - Name: "abs", - Description: "Returns the number without its sign.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - ), - types.Named("y", types.N).Description("the absolute value of `x`"), - ), - Categories: number, -} +var Abs = v1.Abs -var Rem = &Builtin{ - Name: "rem", - Infix: "%", - Description: "Returns the remainder for of `x` divided by `y`, for `y != 0`.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N).Description("the remainder"), - ), - Categories: number, -} +var Rem = v1.Rem /** * Bitwise */ -var BitsOr = &Builtin{ - Name: "bits.or", - Description: "Returns the bitwise \"OR\" of two integers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsOr = v1.BitsOr -var BitsAnd = &Builtin{ - Name: "bits.and", - Description: "Returns the bitwise \"AND\" of two integers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsAnd = v1.BitsAnd -var BitsNegate = &Builtin{ - Name: "bits.negate", - Description: "Returns the bitwise negation (flip) of an integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsNegate = v1.BitsNegate -var BitsXOr = &Builtin{ - Name: "bits.xor", - Description: "Returns the bitwise \"XOR\" (exclusive-or) of two integers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsXOr = v1.BitsXOr -var BitsShiftLeft = &Builtin{ - Name: "bits.lsh", - Description: "Returns a new integer with its bits shifted `s` bits to the left.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("s", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsShiftLeft = v1.BitsShiftLeft -var BitsShiftRight = &Builtin{ - Name: "bits.rsh", - Description: "Returns a new integer with its bits shifted `s` bits to the right.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("s", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsShiftRight = v1.BitsShiftRight /** * Sets */ -var sets = category("sets") - -var And = &Builtin{ - Name: "and", - Infix: "&", - Description: "Returns the intersection of two sets.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewSet(types.A)), - types.Named("y", types.NewSet(types.A)), - ), - types.Named("z", types.NewSet(types.A)).Description("the intersection of `x` and `y`"), - ), - Categories: sets, -} +var And = v1.And // Or performs a union operation on sets. -var Or = &Builtin{ - Name: "or", - Infix: "|", - Description: "Returns the union of two sets.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewSet(types.A)), - types.Named("y", types.NewSet(types.A)), - ), - types.Named("z", types.NewSet(types.A)).Description("the union of `x` and `y`"), - ), - Categories: sets, -} +var Or = v1.Or -var Intersection = &Builtin{ - Name: "intersection", - Description: "Returns the intersection of the given input sets.", - Decl: types.NewFunction( - types.Args( - types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to intersect"), - ), - types.Named("y", types.NewSet(types.A)).Description("the intersection of all `xs` sets"), - ), - Categories: sets, -} +var Intersection = v1.Intersection -var Union = &Builtin{ - Name: "union", - Description: "Returns the union of the given input sets.", - Decl: types.NewFunction( - types.Args( - types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to merge"), - ), - types.Named("y", types.NewSet(types.A)).Description("the union of all `xs` sets"), - ), - Categories: sets, -} +var Union = v1.Union /** * Aggregates */ -var aggregates = category("aggregates") - -var Count = &Builtin{ - Name: "count", - Description: " Count takes a collection or string and returns the number of elements (or characters) in it.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.S, - )).Description("the set/array/object/string to be counted"), - ), - types.Named("n", types.N).Description("the count of elements, key/val pairs, or characters, respectively."), - ), - Categories: aggregates, -} +var Count = v1.Count -var Sum = &Builtin{ - Name: "sum", - Description: "Sums elements of an array or set of numbers.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.N), - types.NewArray(nil, types.N), - )), - ), - types.Named("n", types.N).Description("the sum of all elements"), - ), - Categories: aggregates, -} +var Sum = v1.Sum -var Product = &Builtin{ - Name: "product", - Description: "Muliplies elements of an array or set of numbers", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.N), - types.NewArray(nil, types.N), - )), - ), - types.Named("n", types.N).Description("the product of all elements"), - ), - Categories: aggregates, -} +var Product = v1.Product -var Max = &Builtin{ - Name: "max", - Description: "Returns the maximum value in a collection.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - )), - ), - types.Named("n", types.A).Description("the maximum of all elements"), - ), - Categories: aggregates, -} +var Max = v1.Max -var Min = &Builtin{ - Name: "min", - Description: "Returns the minimum value in a collection.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - )), - ), - types.Named("n", types.A).Description("the minimum of all elements"), - ), - Categories: aggregates, -} +var Min = v1.Min /** * Sorting */ -var Sort = &Builtin{ - Name: "sort", - Description: "Returns a sorted array.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewArray(nil, types.A), - types.NewSet(types.A), - )).Description("the array or set to be sorted"), - ), - types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"), - ), - Categories: aggregates, -} +var Sort = v1.Sort /** * Arrays */ -var ArrayConcat = &Builtin{ - Name: "array.concat", - Description: "Concatenates two arrays.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewArray(nil, types.A)), - types.Named("y", types.NewArray(nil, types.A)), - ), - types.Named("z", types.NewArray(nil, types.A)).Description("the concatenation of `x` and `y`"), - ), -} +var ArrayConcat = v1.ArrayConcat -var ArraySlice = &Builtin{ - Name: "array.slice", - Description: "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.", - Decl: types.NewFunction( - types.Args( - types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"), - types.Named("start", types.NewNumber()).Description("the start index of the returned slice; if less than zero, it's clamped to 0"), - types.Named("stop", types.NewNumber()).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"), - ), - types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"), - ), -} // NOTE(sr): this function really needs examples - -var ArrayReverse = &Builtin{ - Name: "array.reverse", - Description: "Returns the reverse of a given array.", - Decl: types.NewFunction( - types.Args( - types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be reversed"), - ), - types.Named("rev", types.NewArray(nil, types.A)).Description("an array containing the elements of `arr` in reverse order"), - ), -} +var ArraySlice = v1.ArraySlice + +var ArrayReverse = v1.ArrayReverse /** * Conversions */ -var conversions = category("conversions") - -var ToNumber = &Builtin{ - Name: "to_number", - Description: "Converts a string, bool, or number value to a number: Strings are converted to numbers using `strconv.Atoi`, Boolean `false` is converted to 0 and `true` is converted to 1.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.S, - types.B, - types.NewNull(), - )), - ), - types.Named("num", types.N), - ), - Categories: conversions, -} + +var ToNumber = v1.ToNumber /** * Regular Expressions */ -var RegexMatch = &Builtin{ - Name: "regex.match", - Description: "Matches a string against a regular expression.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("value to match against `pattern`"), - ), - types.Named("result", types.B), - ), -} +var RegexMatch = v1.RegexMatch -var RegexIsValid = &Builtin{ - Name: "regex.is_valid", - Description: "Checks if a string is a valid regular expression: the detailed syntax for patterns is defined by https://github.com/google/re2/wiki/Syntax.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - ), - types.Named("result", types.B), - ), -} +var RegexIsValid = v1.RegexIsValid -var RegexFindAllStringSubmatch = &Builtin{ - Name: "regex.find_all_string_submatch_n", - Description: "Returns all successive matches of the expression.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("string to match"), - types.Named("number", types.N).Description("number of matches to return; `-1` means all matches"), - ), - types.Named("output", types.NewArray(nil, types.NewArray(nil, types.S))), - ), -} +var RegexFindAllStringSubmatch = v1.RegexFindAllStringSubmatch -var RegexTemplateMatch = &Builtin{ - Name: "regex.template_match", - Description: "Matches a string against a pattern, where there pattern may be glob-like", - Decl: types.NewFunction( - types.Args( - types.Named("template", types.S).Description("template expression containing `0..n` regular expressions"), - types.Named("value", types.S).Description("string to match"), - types.Named("delimiter_start", types.S).Description("start delimiter of the regular expression in `template`"), - types.Named("delimiter_end", types.S).Description("end delimiter of the regular expression in `template`"), - ), - types.Named("result", types.B), - ), -} // TODO(sr): example:`regex.template_match("urn:foo:{.*}", "urn:foo:bar:baz", "{", "}")`` returns ``true``. - -var RegexSplit = &Builtin{ - Name: "regex.split", - Description: "Splits the input string by the occurrences of the given pattern.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("string to match"), - ), - types.Named("output", types.NewArray(nil, types.S)).Description("the parts obtained by splitting `value`"), - ), -} +var RegexTemplateMatch = v1.RegexTemplateMatch + +var RegexSplit = v1.RegexSplit // RegexFind takes two strings and a number, the pattern, the value and number of match values to // return, -1 means all match values. -var RegexFind = &Builtin{ - Name: "regex.find_n", - Description: "Returns the specified number of matches when matching the input against the pattern.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("string to match"), - types.Named("number", types.N).Description("number of matches to return, if `-1`, returns all matches"), - ), - types.Named("output", types.NewArray(nil, types.S)).Description("collected matches"), - ), -} +var RegexFind = v1.RegexFind // GlobsMatch takes two strings regexp-style strings and evaluates to true if their // intersection matches a non-empty set of non-empty strings. // Examples: // - "a.a." and ".b.b" -> true. // - "[a-z]*" and [0-9]+" -> not true. -var GlobsMatch = &Builtin{ - Name: "regex.globs_match", - Description: `Checks if the intersection of two glob-style regular expressions matches a non-empty set of non-empty strings. -The set of regex symbols is limited for this builtin: only ` + "`.`, `*`, `+`, `[`, `-`, `]` and `\\` are treated as special symbols.", - Decl: types.NewFunction( - types.Args( - types.Named("glob1", types.S), - types.Named("glob2", types.S), - ), - types.Named("result", types.B), - ), -} +var GlobsMatch = v1.GlobsMatch /** * Strings */ -var stringsCat = category("strings") - -var AnyPrefixMatch = &Builtin{ - Name: "strings.any_prefix_match", - Description: "Returns true if any of the search strings begins with any of the base strings.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("search string(s)"), - types.Named("base", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("base string(s)"), - ), - types.Named("result", types.B).Description("result of the prefix check"), - ), - Categories: stringsCat, -} -var AnySuffixMatch = &Builtin{ - Name: "strings.any_suffix_match", - Description: "Returns true if any of the search strings ends with any of the base strings.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("search string(s)"), - types.Named("base", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("base string(s)"), - ), - types.Named("result", types.B).Description("result of the suffix check"), - ), - Categories: stringsCat, -} +var AnyPrefixMatch = v1.AnyPrefixMatch -var Concat = &Builtin{ - Name: "concat", - Description: "Joins a set or array of strings with a delimiter.", - Decl: types.NewFunction( - types.Args( - types.Named("delimiter", types.S), - types.Named("collection", types.NewAny( - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("strings to join"), - ), - types.Named("output", types.S), - ), - Categories: stringsCat, -} +var AnySuffixMatch = v1.AnySuffixMatch -var FormatInt = &Builtin{ - Name: "format_int", - Description: "Returns the string representation of the number in the given base after rounding it down to an integer value.", - Decl: types.NewFunction( - types.Args( - types.Named("number", types.N).Description("number to format"), - types.Named("base", types.N).Description("base of number representation to use"), - ), - types.Named("output", types.S).Description("formatted number"), - ), - Categories: stringsCat, -} +var Concat = v1.Concat -var IndexOf = &Builtin{ - Name: "indexof", - Description: "Returns the index of a substring contained inside a string.", - Decl: types.NewFunction( - types.Args( - types.Named("haystack", types.S).Description("string to search in"), - types.Named("needle", types.S).Description("substring to look for"), - ), - types.Named("output", types.N).Description("index of first occurrence, `-1` if not found"), - ), - Categories: stringsCat, -} +var FormatInt = v1.FormatInt -var IndexOfN = &Builtin{ - Name: "indexof_n", - Description: "Returns a list of all the indexes of a substring contained inside a string.", - Decl: types.NewFunction( - types.Args( - types.Named("haystack", types.S).Description("string to search in"), - types.Named("needle", types.S).Description("substring to look for"), - ), - types.Named("output", types.NewArray(nil, types.N)).Description("all indices at which `needle` occurs in `haystack`, may be empty"), - ), - Categories: stringsCat, -} +var IndexOf = v1.IndexOf -var Substring = &Builtin{ - Name: "substring", - Description: "Returns the portion of a string for a given `offset` and a `length`. If `length < 0`, `output` is the remainder of the string.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S), - types.Named("offset", types.N).Description("offset, must be positive"), - types.Named("length", types.N).Description("length of the substring starting from `offset`"), - ), - types.Named("output", types.S).Description("substring of `value` from `offset`, of length `length`"), - ), - Categories: stringsCat, -} +var IndexOfN = v1.IndexOfN -var Contains = &Builtin{ - Name: "contains", - Description: "Returns `true` if the search string is included in the base string", - Decl: types.NewFunction( - types.Args( - types.Named("haystack", types.S).Description("string to search in"), - types.Named("needle", types.S).Description("substring to look for"), - ), - types.Named("result", types.B).Description("result of the containment check"), - ), - Categories: stringsCat, -} +var Substring = v1.Substring -var StringCount = &Builtin{ - Name: "strings.count", - Description: "Returns the number of non-overlapping instances of a substring in a string.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.S).Description("string to search in"), - types.Named("substring", types.S).Description("substring to look for"), - ), - types.Named("output", types.N).Description("count of occurrences, `0` if not found"), - ), - Categories: stringsCat, -} +var Contains = v1.Contains -var StartsWith = &Builtin{ - Name: "startswith", - Description: "Returns true if the search string begins with the base string.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.S).Description("search string"), - types.Named("base", types.S).Description("base string"), - ), - types.Named("result", types.B).Description("result of the prefix check"), - ), - Categories: stringsCat, -} +var StringCount = v1.StringCount -var EndsWith = &Builtin{ - Name: "endswith", - Description: "Returns true if the search string ends with the base string.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.S).Description("search string"), - types.Named("base", types.S).Description("base string"), - ), - types.Named("result", types.B).Description("result of the suffix check"), - ), - Categories: stringsCat, -} +var StartsWith = v1.StartsWith -var Lower = &Builtin{ - Name: "lower", - Description: "Returns the input string but with all characters in lower-case.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string that is converted to lower-case"), - ), - types.Named("y", types.S).Description("lower-case of x"), - ), - Categories: stringsCat, -} +var EndsWith = v1.EndsWith -var Upper = &Builtin{ - Name: "upper", - Description: "Returns the input string but with all characters in upper-case.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string that is converted to upper-case"), - ), - types.Named("y", types.S).Description("upper-case of x"), - ), - Categories: stringsCat, -} +var Lower = v1.Lower -var Split = &Builtin{ - Name: "split", - Description: "Split returns an array containing elements of the input string split on a delimiter.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string that is split"), - types.Named("delimiter", types.S).Description("delimiter used for splitting"), - ), - types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"), - ), - Categories: stringsCat, -} +var Upper = v1.Upper -var Replace = &Builtin{ - Name: "replace", - Description: "Replace replaces all instances of a sub-string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string being processed"), - types.Named("old", types.S).Description("substring to replace"), - types.Named("new", types.S).Description("string to replace `old` with"), - ), - types.Named("y", types.S).Description("string with replaced substrings"), - ), - Categories: stringsCat, -} +var Split = v1.Split -var ReplaceN = &Builtin{ - Name: "strings.replace_n", - Description: `Replaces a string from a list of old, new string pairs. -Replacements are performed in the order they appear in the target string, without overlapping matches. -The old string comparisons are done in argument order.`, - Decl: types.NewFunction( - types.Args( - types.Named("patterns", types.NewObject( - nil, - types.NewDynamicProperty( - types.S, - types.S)), - ).Description("replacement pairs"), - types.Named("value", types.S).Description("string to replace substring matches in"), - ), - types.Named("output", types.S), - ), -} +var Replace = v1.Replace -var RegexReplace = &Builtin{ - Name: "regex.replace", - Description: `Find and replaces the text using the regular expression pattern.`, - Decl: types.NewFunction( - types.Args( - types.Named("s", types.S).Description("string being processed"), - types.Named("pattern", types.S).Description("regex pattern to be applied"), - types.Named("value", types.S).Description("regex value"), - ), - types.Named("output", types.S), - ), -} +var ReplaceN = v1.ReplaceN -var Trim = &Builtin{ - Name: "trim", - Description: "Returns `value` with all leading or trailing instances of the `cutset` characters removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("cutset", types.S).Description("string of characters that are cut off"), - ), - types.Named("output", types.S).Description("string trimmed of `cutset` characters"), - ), - Categories: stringsCat, -} +var RegexReplace = v1.RegexReplace -var TrimLeft = &Builtin{ - Name: "trim_left", - Description: "Returns `value` with all leading instances of the `cutset` characters removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("cutset", types.S).Description("string of characters that are cut off on the left"), - ), - types.Named("output", types.S).Description("string left-trimmed of `cutset` characters"), - ), - Categories: stringsCat, -} +var Trim = v1.Trim -var TrimPrefix = &Builtin{ - Name: "trim_prefix", - Description: "Returns `value` without the prefix. If `value` doesn't start with `prefix`, it is returned unchanged.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("prefix", types.S).Description("prefix to cut off"), - ), - types.Named("output", types.S).Description("string with `prefix` cut off"), - ), - Categories: stringsCat, -} +var TrimLeft = v1.TrimLeft -var TrimRight = &Builtin{ - Name: "trim_right", - Description: "Returns `value` with all trailing instances of the `cutset` characters removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("cutset", types.S).Description("string of characters that are cut off on the right"), - ), - types.Named("output", types.S).Description("string right-trimmed of `cutset` characters"), - ), - Categories: stringsCat, -} +var TrimPrefix = v1.TrimPrefix -var TrimSuffix = &Builtin{ - Name: "trim_suffix", - Description: "Returns `value` without the suffix. If `value` doesn't end with `suffix`, it is returned unchanged.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("suffix", types.S).Description("suffix to cut off"), - ), - types.Named("output", types.S).Description("string with `suffix` cut off"), - ), - Categories: stringsCat, -} +var TrimRight = v1.TrimRight -var TrimSpace = &Builtin{ - Name: "trim_space", - Description: "Return the given string with all leading and trailing white space removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - ), - types.Named("output", types.S).Description("string leading and trailing white space cut off"), - ), - Categories: stringsCat, -} +var TrimSuffix = v1.TrimSuffix -var Sprintf = &Builtin{ - Name: "sprintf", - Description: "Returns the given string, formatted.", - Decl: types.NewFunction( - types.Args( - types.Named("format", types.S).Description("string with formatting verbs"), - types.Named("values", types.NewArray(nil, types.A)).Description("arguments to format into formatting verbs"), - ), - types.Named("output", types.S).Description("`format` formatted by the values in `values`"), - ), - Categories: stringsCat, -} +var TrimSpace = v1.TrimSpace -var StringReverse = &Builtin{ - Name: "strings.reverse", - Description: "Reverses a given string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S), - ), - Categories: stringsCat, -} +var Sprintf = v1.Sprintf -var RenderTemplate = &Builtin{ - Name: "strings.render_template", - Description: `Renders a templated string with given template variables injected. For a given templated string and key/value mapping, values will be injected into the template where they are referenced by key. - For examples of templating syntax, see https://pkg.go.dev/text/template`, - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("a templated string"), - types.Named("vars", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("a mapping of template variable keys to values"), - ), - types.Named("result", types.S).Description("rendered template with template variables injected"), - ), - Categories: stringsCat, -} +var StringReverse = v1.StringReverse + +var RenderTemplate = v1.RenderTemplate /** * Numbers @@ -1354,82 +236,19 @@ var RenderTemplate = &Builtin{ // RandIntn returns a random number 0 - n // Marked non-deterministic because it relies on RNG internally. -var RandIntn = &Builtin{ - Name: "rand.intn", - Description: "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.", - Decl: types.NewFunction( - types.Args( - types.Named("str", types.S), - types.Named("n", types.N), - ), - types.Named("y", types.N).Description("random integer in the range `[0, abs(n))`"), - ), - Categories: number, - Nondeterministic: true, -} +var RandIntn = v1.RandIntn -var NumbersRange = &Builtin{ - Name: "numbers.range", - Description: "Returns an array of numbers in the given (inclusive) range. If `a==b`, then `range == [a]`; if `a > b`, then `range` is in descending order.", - Decl: types.NewFunction( - types.Args( - types.Named("a", types.N), - types.Named("b", types.N), - ), - types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b`"), - ), -} +var NumbersRange = v1.NumbersRange -var NumbersRangeStep = &Builtin{ - Name: "numbers.range_step", - Description: `Returns an array of numbers in the given (inclusive) range incremented by a positive step. - If "a==b", then "range == [a]"; if "a > b", then "range" is in descending order. - If the provided "step" is less then 1, an error will be thrown. - If "b" is not in the range of the provided "step", "b" won't be included in the result. - `, - Decl: types.NewFunction( - types.Args( - types.Named("a", types.N), - types.Named("b", types.N), - types.Named("step", types.N), - ), - types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b` in `step` increments"), - ), -} +var NumbersRangeStep = v1.NumbersRangeStep /** * Units */ -var UnitsParse = &Builtin{ - Name: "units.parse", - Description: `Converts strings like "10G", "5K", "4M", "1500m" and the like into a number. -This number can be a non-integer, such as 1.5, 0.22, etc. Supports standard metric decimal and -binary SI units (e.g., K, Ki, M, Mi, G, Gi etc.) m, K, M, G, T, P, and E are treated as decimal -units and Ki, Mi, Gi, Ti, Pi, and Ei are treated as binary units. - -Note that 'm' and 'M' are case-sensitive, to allow distinguishing between "milli" and "mega" units respectively. Other units are case-insensitive.`, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("the unit to parse"), - ), - types.Named("y", types.N).Description("the parsed number"), - ), -} +var UnitsParse = v1.UnitsParse -var UnitsParseBytes = &Builtin{ - Name: "units.parse_bytes", - Description: `Converts strings like "10GB", "5K", "4mb" into an integer number of bytes. -Supports standard byte units (e.g., KB, KiB, etc.) KB, MB, GB, and TB are treated as decimal -units and KiB, MiB, GiB, and TiB are treated as binary units. The bytes symbol (b/B) in the -unit is optional and omitting it wil give the same result (e.g. Mi and MiB).`, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("the byte unit to parse"), - ), - types.Named("y", types.N).Description("the parsed number"), - ), -} +var UnitsParseBytes = v1.UnitsParseBytes // /** @@ -1438,1372 +257,241 @@ unit is optional and omitting it wil give the same result (e.g. Mi and MiB).`, // UUIDRFC4122 returns a version 4 UUID string. // Marked non-deterministic because it relies on RNG internally. -var UUIDRFC4122 = &Builtin{ - Name: "uuid.rfc4122", - Description: "Returns a new UUIDv4.", - Decl: types.NewFunction( - types.Args( - types.Named("k", types.S), - ), - types.Named("output", types.S).Description("a version 4 UUID; for any given `k`, the output will be consistent throughout a query evaluation"), - ), - Nondeterministic: true, -} +var UUIDRFC4122 = v1.UUIDRFC4122 -var UUIDParse = &Builtin{ - Name: "uuid.parse", - Description: "Parses the string value as an UUID and returns an object with the well-defined fields of the UUID if valid.", - Categories: nil, - Decl: types.NewFunction( - types.Args( - types.Named("uuid", types.S), - ), - types.Named("result", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("Properties of UUID if valid (version, variant, etc). Undefined otherwise."), - ), - Relation: false, -} +var UUIDParse = v1.UUIDParse /** * JSON */ -var objectCat = category("object") - -var JSONFilter = &Builtin{ - Name: "json.filter", - Description: "Filters the object. " + - "For example: `json.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"b\": \"x\"}}`). " + - "Paths are not filtered in-order and are deduplicated before being evaluated.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - types.Named("paths", types.NewAny( - types.NewArray( - nil, - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - types.NewSet( - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - )).Description("JSON string paths"), - ), - types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `paths`"), - ), - Categories: objectCat, -} +var JSONFilter = v1.JSONFilter -var JSONRemove = &Builtin{ - Name: "json.remove", - Description: "Removes paths from an object. " + - "For example: `json.remove({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"c\": \"y\"}}`. " + - "Paths are not removed in-order and are deduplicated before being evaluated.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - types.Named("paths", types.NewAny( - types.NewArray( - nil, - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - types.NewSet( - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - )).Description("JSON string paths"), - ), - types.Named("output", types.A).Description("result of removing all keys specified in `paths`"), - ), - Categories: objectCat, -} +var JSONRemove = v1.JSONRemove -var JSONPatch = &Builtin{ - Name: "json.patch", - Description: "Patches an object according to RFC6902. " + - "For example: `json.patch({\"a\": {\"foo\": 1}}, [{\"op\": \"add\", \"path\": \"/a/bar\", \"value\": 2}])` results in `{\"a\": {\"foo\": 1, \"bar\": 2}`. " + - "The patches are applied atomically: if any of them fails, the result will be undefined. " + - "Additionally works on sets, where a value contained in the set is considered to be its path.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.A), // TODO(sr): types.A? - types.Named("patches", types.NewArray( - nil, - types.NewObject( - []*types.StaticProperty{ - {Key: "op", Value: types.S}, - {Key: "path", Value: types.A}, - }, - types.NewDynamicProperty(types.A, types.A), - ), - )), - ), - types.Named("output", types.A).Description("result obtained after consecutively applying all patch operations in `patches`"), - ), - Categories: objectCat, -} +var JSONPatch = v1.JSONPatch -var ObjectSubset = &Builtin{ - Name: "object.subset", - Description: "Determines if an object `sub` is a subset of another object `super`." + - "Object `sub` is a subset of object `super` if and only if every key in `sub` is also in `super`, " + - "**and** for all keys which `sub` and `super` share, they have the same value. " + - "This function works with objects, sets, arrays and a set of array and set." + - "If both arguments are objects, then the operation is recursive, e.g. " + - "`{\"c\": {\"x\": {10, 15, 20}}` is a subset of `{\"a\": \"b\", \"c\": {\"x\": {10, 15, 20, 25}, \"y\": \"z\"}`. " + - "If both arguments are sets, then this function checks if every element of `sub` is a member of `super`, " + - "but does not attempt to recurse. If both arguments are arrays, " + - "then this function checks if `sub` appears contiguously in order within `super`, " + - "and also does not attempt to recurse. If `super` is array and `sub` is set, " + - "then this function checks if `super` contains every element of `sub` with no consideration of ordering, " + - "and also does not attempt to recurse.", - Decl: types.NewFunction( - types.Args( - types.Named("super", types.NewAny(types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - ), - types.NewSet(types.A), - types.NewArray(nil, types.A), - )).Description("object to test if sub is a subset of"), - types.Named("sub", types.NewAny(types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - ), - types.NewSet(types.A), - types.NewArray(nil, types.A), - )).Description("object to test if super is a superset of"), - ), - types.Named("result", types.A).Description("`true` if `sub` is a subset of `super`"), - ), -} +var ObjectSubset = v1.ObjectSubset -var ObjectUnion = &Builtin{ - Name: "object.union", - Description: "Creates a new object of the asymmetric union of two objects. " + - "For example: `object.union({\"a\": 1, \"b\": 2, \"c\": {\"d\": 3}}, {\"a\": 7, \"c\": {\"d\": 4, \"e\": 5}})` will result in `{\"a\": 7, \"b\": 2, \"c\": {\"d\": 4, \"e\": 5}}`.", - Decl: types.NewFunction( - types.Args( - types.Named("a", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - types.Named("b", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - ), - types.Named("output", types.A).Description("a new object which is the result of an asymmetric recursive union of two objects where conflicts are resolved by choosing the key from the right-hand object `b`"), - ), // TODO(sr): types.A? ^^^^^^^ (also below) -} +var ObjectUnion = v1.ObjectUnion -var ObjectUnionN = &Builtin{ - Name: "object.union_n", - Description: "Creates a new object that is the asymmetric union of all objects merged from left to right. " + - "For example: `object.union_n([{\"a\": 1}, {\"b\": 2}, {\"a\": 3}])` will result in `{\"b\": 2, \"a\": 3}`.", - Decl: types.NewFunction( - types.Args( - types.Named("objects", types.NewArray( - nil, - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - )), - ), - types.Named("output", types.A).Description("asymmetric recursive union of all objects in `objects`, merged from left to right, where conflicts are resolved by choosing the key from the right-hand object"), - ), -} +var ObjectUnionN = v1.ObjectUnionN -var ObjectRemove = &Builtin{ - Name: "object.remove", - Description: "Removes specified keys from an object.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )).Description("object to remove keys from"), - types.Named("keys", types.NewAny( - types.NewArray(nil, types.A), - types.NewSet(types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - )).Description("keys to remove from x"), - ), - types.Named("output", types.A).Description("result of removing the specified `keys` from `object`"), - ), -} +var ObjectRemove = v1.ObjectRemove -var ObjectFilter = &Builtin{ - Name: "object.filter", - Description: "Filters the object by keeping only specified keys. " + - "For example: `object.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}, \"d\": \"z\"}, [\"a\"])` will result in `{\"a\": {\"b\": \"x\", \"c\": \"y\"}}`).", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )).Description("object to filter keys"), - types.Named("keys", types.NewAny( - types.NewArray(nil, types.A), - types.NewSet(types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - )), - ), - types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `keys`"), - ), -} +var ObjectFilter = v1.ObjectFilter -var ObjectGet = &Builtin{ - Name: "object.get", - Description: "Returns value of an object's key if present, otherwise a default. " + - "If the supplied `key` is an `array`, then `object.get` will search through a nested object or array using each key in turn. " + - "For example: `object.get({\"a\": [{ \"b\": true }]}, [\"a\", 0, \"b\"], false)` results in `true`.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get `key` from"), - types.Named("key", types.A).Description("key to lookup in `object`"), - types.Named("default", types.A).Description("default to use when lookup fails"), - ), - types.Named("value", types.A).Description("`object[key]` if present, otherwise `default`"), - ), -} +var ObjectGet = v1.ObjectGet -var ObjectKeys = &Builtin{ - Name: "object.keys", - Description: "Returns a set of an object's keys. " + - "For example: `object.keys({\"a\": 1, \"b\": true, \"c\": \"d\")` results in `{\"a\", \"b\", \"c\"}`.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"), - ), - types.Named("value", types.NewSet(types.A)).Description("set of `object`'s keys"), - ), -} +var ObjectKeys = v1.ObjectKeys /* * Encoding */ -var encoding = category("encoding") - -var JSONMarshal = &Builtin{ - Name: "json.marshal", - Description: "Serializes the input term to JSON.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A).Description("the term to serialize"), - ), - types.Named("y", types.S).Description("the JSON string representation of `x`"), - ), - Categories: encoding, -} -var JSONMarshalWithOptions = &Builtin{ - Name: "json.marshal_with_options", - Description: "Serializes the input term JSON, with additional formatting options via the `opts` parameter. " + - "`opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A).Description("the term to serialize"), - types.Named("opts", types.NewObject( - []*types.StaticProperty{ - types.NewStaticProperty("pretty", types.B), - types.NewStaticProperty("indent", types.S), - types.NewStaticProperty("prefix", types.S), - }, - types.NewDynamicProperty(types.S, types.A), - )).Description("encoding options"), - ), - types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"), - ), - Categories: encoding, -} +var JSONMarshal = v1.JSONMarshal -var JSONUnmarshal = &Builtin{ - Name: "json.unmarshal", - Description: "Deserializes the input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a JSON string"), - ), - types.Named("y", types.A).Description("the term deserialized from `x`"), - ), - Categories: encoding, -} +var JSONMarshalWithOptions = v1.JSONMarshalWithOptions -var JSONIsValid = &Builtin{ - Name: "json.is_valid", - Description: "Verifies the input string is a valid JSON document.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a JSON string"), - ), - types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"), - ), - Categories: encoding, -} +var JSONUnmarshal = v1.JSONUnmarshal -var Base64Encode = &Builtin{ - Name: "base64.encode", - Description: "Serializes the input string into base64 encoding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64 serialization of `x`"), - ), - Categories: encoding, -} +var JSONIsValid = v1.JSONIsValid -var Base64Decode = &Builtin{ - Name: "base64.decode", - Description: "Deserializes the base64 encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64 deserialization of `x`"), - ), - Categories: encoding, -} +var Base64Encode = v1.Base64Encode -var Base64IsValid = &Builtin{ - Name: "base64.is_valid", - Description: "Verifies the input string is base64 encoded.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"), - ), - Categories: encoding, -} +var Base64Decode = v1.Base64Decode -var Base64UrlEncode = &Builtin{ - Name: "base64url.encode", - Description: "Serializes the input string into base64url encoding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64url serialization of `x`"), - ), - Categories: encoding, -} +var Base64IsValid = v1.Base64IsValid -var Base64UrlEncodeNoPad = &Builtin{ - Name: "base64url.encode_no_pad", - Description: "Serializes the input string into base64url encoding without padding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64url serialization of `x`"), - ), - Categories: encoding, -} +var Base64UrlEncode = v1.Base64UrlEncode -var Base64UrlDecode = &Builtin{ - Name: "base64url.decode", - Description: "Deserializes the base64url encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64url deserialization of `x`"), - ), - Categories: encoding, -} +var Base64UrlEncodeNoPad = v1.Base64UrlEncodeNoPad -var URLQueryDecode = &Builtin{ - Name: "urlquery.decode", - Description: "Decodes a URL-encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("URL-encoding deserialization of `x`"), - ), - Categories: encoding, -} +var Base64UrlDecode = v1.Base64UrlDecode -var URLQueryEncode = &Builtin{ - Name: "urlquery.encode", - Description: "Encodes the input string into a URL-encoded string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("URL-encoding serialization of `x`"), - ), - Categories: encoding, -} +var URLQueryDecode = v1.URLQueryDecode -var URLQueryEncodeObject = &Builtin{ - Name: "urlquery.encode_object", - Description: "Encodes the given object into a URL encoded query string.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty( - types.S, - types.NewAny( - types.S, - types.NewArray(nil, types.S), - types.NewSet(types.S)))))), - types.Named("y", types.S).Description("the URL-encoded serialization of `object`"), - ), - Categories: encoding, -} +var URLQueryEncode = v1.URLQueryEncode -var URLQueryDecodeObject = &Builtin{ - Name: "urlquery.decode_object", - Description: "Decodes the given URL query string into an object.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("the query string"), - ), - types.Named("object", types.NewObject(nil, types.NewDynamicProperty( - types.S, - types.NewArray(nil, types.S)))).Description("the resulting object"), - ), - Categories: encoding, -} +var URLQueryEncodeObject = v1.URLQueryEncodeObject -var YAMLMarshal = &Builtin{ - Name: "yaml.marshal", - Description: "Serializes the input term to YAML.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A).Description("the term to serialize"), - ), - types.Named("y", types.S).Description("the YAML string representation of `x`"), - ), - Categories: encoding, -} +var URLQueryDecodeObject = v1.URLQueryDecodeObject -var YAMLUnmarshal = &Builtin{ - Name: "yaml.unmarshal", - Description: "Deserializes the input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a YAML string"), - ), - types.Named("y", types.A).Description("the term deserialized from `x`"), - ), - Categories: encoding, -} +var YAMLMarshal = v1.YAMLMarshal + +var YAMLUnmarshal = v1.YAMLUnmarshal // YAMLIsValid verifies the input string is a valid YAML document. -var YAMLIsValid = &Builtin{ - Name: "yaml.is_valid", - Description: "Verifies the input string is a valid YAML document.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a YAML string"), - ), - types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"), - ), - Categories: encoding, -} +var YAMLIsValid = v1.YAMLIsValid -var HexEncode = &Builtin{ - Name: "hex.encode", - Description: "Serializes the input string using hex-encoding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("serialization of `x` using hex-encoding"), - ), - Categories: encoding, -} +var HexEncode = v1.HexEncode -var HexDecode = &Builtin{ - Name: "hex.decode", - Description: "Deserializes the hex-encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a hex-encoded string"), - ), - types.Named("y", types.S).Description("deserialized from `x`"), - ), - Categories: encoding, -} +var HexDecode = v1.HexDecode /** * Tokens */ -var tokensCat = category("tokens") - -var JWTDecode = &Builtin{ - Name: "io.jwt.decode", - Description: "Decodes a JSON Web Token and outputs it as an object.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token to decode"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.S, - }, nil)).Description("`[header, payload, sig]`, where `header` and `payload` are objects; `sig` is the hexadecimal representation of the signature on the token."), - ), - Categories: tokensCat, -} -var JWTVerifyRS256 = &Builtin{ - Name: "io.jwt.verify_rs256", - Description: "Verifies if a RS256 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTDecode = v1.JWTDecode -var JWTVerifyRS384 = &Builtin{ - Name: "io.jwt.verify_rs384", - Description: "Verifies if a RS384 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyRS256 = v1.JWTVerifyRS256 -var JWTVerifyRS512 = &Builtin{ - Name: "io.jwt.verify_rs512", - Description: "Verifies if a RS512 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyRS384 = v1.JWTVerifyRS384 -var JWTVerifyPS256 = &Builtin{ - Name: "io.jwt.verify_ps256", - Description: "Verifies if a PS256 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyRS512 = v1.JWTVerifyRS512 -var JWTVerifyPS384 = &Builtin{ - Name: "io.jwt.verify_ps384", - Description: "Verifies if a PS384 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyPS256 = v1.JWTVerifyPS256 -var JWTVerifyPS512 = &Builtin{ - Name: "io.jwt.verify_ps512", - Description: "Verifies if a PS512 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyPS384 = v1.JWTVerifyPS384 -var JWTVerifyES256 = &Builtin{ - Name: "io.jwt.verify_es256", - Description: "Verifies if a ES256 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyPS512 = v1.JWTVerifyPS512 -var JWTVerifyES384 = &Builtin{ - Name: "io.jwt.verify_es384", - Description: "Verifies if a ES384 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyES256 = v1.JWTVerifyES256 -var JWTVerifyES512 = &Builtin{ - Name: "io.jwt.verify_es512", - Description: "Verifies if a ES512 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyES384 = v1.JWTVerifyES384 -var JWTVerifyHS256 = &Builtin{ - Name: "io.jwt.verify_hs256", - Description: "Verifies if a HS256 (secret) JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("secret", types.S).Description("plain text secret used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyES512 = v1.JWTVerifyES512 -var JWTVerifyHS384 = &Builtin{ - Name: "io.jwt.verify_hs384", - Description: "Verifies if a HS384 (secret) JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("secret", types.S).Description("plain text secret used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyHS256 = v1.JWTVerifyHS256 -var JWTVerifyHS512 = &Builtin{ - Name: "io.jwt.verify_hs512", - Description: "Verifies if a HS512 (secret) JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("secret", types.S).Description("plain text secret used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyHS384 = v1.JWTVerifyHS384 -// Marked non-deterministic because it relies on time internally. -var JWTDecodeVerify = &Builtin{ - Name: "io.jwt.decode_verify", - Description: `Verifies a JWT signature under parameterized constraints and decodes the claims if it is valid. -Supports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, PS256, PS384 and PS512.`, - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified and whose claims are to be checked"), - types.Named("constraints", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("claim verification constraints"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - }, nil)).Description("`[valid, header, payload]`: if the input token is verified and meets the requirements of `constraints` then `valid` is `true`; `header` and `payload` are objects containing the JOSE header and the JWT claim set; otherwise, `valid` is `false`, `header` and `payload` are `{}`"), - ), - Categories: tokensCat, - Nondeterministic: true, -} +var JWTVerifyHS512 = v1.JWTVerifyHS512 -var tokenSign = category("tokensign") +// Marked non-deterministic because it relies on time internally. +var JWTDecodeVerify = v1.JWTDecodeVerify // Marked non-deterministic because it relies on RNG internally. -var JWTEncodeSignRaw = &Builtin{ - Name: "io.jwt.encode_sign_raw", - Description: "Encodes and optionally signs a JSON Web Token.", - Decl: types.NewFunction( - types.Args( - types.Named("headers", types.S).Description("JWS Protected Header"), - types.Named("payload", types.S).Description("JWS Payload"), - types.Named("key", types.S).Description("JSON Web Key (RFC7517)"), - ), - types.Named("output", types.S).Description("signed JWT"), - ), - Categories: tokenSign, - Nondeterministic: true, -} +var JWTEncodeSignRaw = v1.JWTEncodeSignRaw // Marked non-deterministic because it relies on RNG internally. -var JWTEncodeSign = &Builtin{ - Name: "io.jwt.encode_sign", - Description: "Encodes and optionally signs a JSON Web Token. Inputs are taken as objects, not encoded strings (see `io.jwt.encode_sign_raw`).", - Decl: types.NewFunction( - types.Args( - types.Named("headers", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Protected Header"), - types.Named("payload", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Payload"), - types.Named("key", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JSON Web Key (RFC7517)"), - ), - types.Named("output", types.S).Description("signed JWT"), - ), - Categories: tokenSign, - Nondeterministic: true, -} +var JWTEncodeSign = v1.JWTEncodeSign /** * Time */ // Marked non-deterministic because it relies on time directly. -var NowNanos = &Builtin{ - Name: "time.now_ns", - Description: "Returns the current time since epoch in nanoseconds.", - Decl: types.NewFunction( - nil, - types.Named("now", types.N).Description("nanoseconds since epoch"), - ), - Nondeterministic: true, -} +var NowNanos = v1.NowNanos -var ParseNanos = &Builtin{ - Name: "time.parse_ns", - Description: "Returns the time in nanoseconds parsed from the string in the given format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", - Decl: types.NewFunction( - types.Args( - types.Named("layout", types.S).Description("format used for parsing, see the [Go `time` package documentation](https://golang.org/pkg/time/#Parse) for more details"), - types.Named("value", types.S).Description("input to parse according to `layout`"), - ), - types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), - ), -} +var ParseNanos = v1.ParseNanos -var ParseRFC3339Nanos = &Builtin{ - Name: "time.parse_rfc3339_ns", - Description: "Returns the time in nanoseconds parsed from the string in RFC3339 format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S), - ), - types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), - ), -} +var ParseRFC3339Nanos = v1.ParseRFC3339Nanos -var ParseDurationNanos = &Builtin{ - Name: "time.parse_duration_ns", - Description: "Returns the duration in nanoseconds represented by a string.", - Decl: types.NewFunction( - types.Args( - types.Named("duration", types.S).Description("a duration like \"3m\"; see the [Go `time` package documentation](https://golang.org/pkg/time/#ParseDuration) for more details"), - ), - types.Named("ns", types.N).Description("the `duration` in nanoseconds"), - ), -} +var ParseDurationNanos = v1.ParseDurationNanos -var Format = &Builtin{ - Name: "time.format", - Description: "Returns the formatted timestamp for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - types.NewArray([]types.Type{types.N, types.S, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string or golang defined formatting constant (see golang supported time formats)"), - ), - types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"), - ), -} +var Format = v1.Format -var Date = &Builtin{ - Name: "time.date", - Description: "Returns the `[year, month, day]` for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), - ), - types.Named("date", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).Description("an array of `year`, `month` (1-12), and `day` (1-31)"), - ), -} +var Date = v1.Date -var Clock = &Builtin{ - Name: "time.clock", - Description: "Returns the `[hour, minute, second]` of the day for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), - ), - types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)). - Description("the `hour`, `minute` (0-59), and `second` (0-59) representing the time of day for the nanoseconds since epoch in the supplied timezone (or UTC)"), - ), -} +var Clock = v1.Clock -var Weekday = &Builtin{ - Name: "time.weekday", - Description: "Returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), - ), - types.Named("day", types.S).Description("the weekday represented by `ns` nanoseconds since the epoch in the supplied timezone (or UTC)"), - ), -} +var Weekday = v1.Weekday -var AddDate = &Builtin{ - Name: "time.add_date", - Description: "Returns the nanoseconds since epoch after adding years, months and days to nanoseconds. Month & day values outside their usual ranges after the operation and will be normalized - for example, October 32 would become November 1. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", - Decl: types.NewFunction( - types.Args( - types.Named("ns", types.N).Description("nanoseconds since the epoch"), - types.Named("years", types.N), - types.Named("months", types.N), - types.Named("days", types.N), - ), - types.Named("output", types.N).Description("nanoseconds since the epoch representing the input time, with years, months and days added"), - ), -} +var AddDate = v1.AddDate -var Diff = &Builtin{ - Name: "time.diff", - Description: "Returns the difference between two unix timestamps in nanoseconds (with optional timezone strings).", - Decl: types.NewFunction( - types.Args( - types.Named("ns1", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )), - types.Named("ns2", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )), - ), - types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N, types.N, types.N, types.N}, nil)).Description("difference between `ns1` and `ns2` (in their supplied timezones, if supplied, or UTC) as array of numbers: `[years, months, days, hours, minutes, seconds]`"), - ), -} +var Diff = v1.Diff /** * Crypto. */ -var CryptoX509ParseCertificates = &Builtin{ - Name: "crypto.x509.parse_certificates", - Description: `Returns zero or more certificates from the given encoded string containing -DER certificate data. - -If the input is empty, the function will return null. The input string should be a list of one or more -concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, - Decl: types.NewFunction( - types.Args( - types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing one or more certificates or a PEM string of one or more certificates"), - ), - types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed X.509 certificates represented as objects"), - ), -} +var CryptoX509ParseCertificates = v1.CryptoX509ParseCertificates -var CryptoX509ParseAndVerifyCertificates = &Builtin{ - Name: "crypto.x509.parse_and_verify_certificates", - Description: `Returns one or more certificates from the given string containing PEM -or base64 encoded DER certificates after verifying the supplied certificates form a complete -certificate chain back to a trusted root. - -The first certificate is treated as the root and the last is treated as the leaf, -with all others being treated as intermediates.`, - Decl: types.NewFunction( - types.Args( - types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), - ), -} +var CryptoX509ParseAndVerifyCertificates = v1.CryptoX509ParseAndVerifyCertificates -var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{ - Name: "crypto.x509.parse_and_verify_certificates_with_options", - Description: `Returns one or more certificates from the given string containing PEM -or base64 encoded DER certificates after verifying the supplied certificates form a complete -certificate chain back to a trusted root. A config option passed as the second argument can -be used to configure the validation options used. - -The first certificate is treated as the root and the last is treated as the leaf, -with all others being treated as intermediates.`, - - Decl: types.NewFunction( - types.Args( - types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), - types.Named("options", types.NewObject( - nil, - types.NewDynamicProperty(types.S, types.A), - )).Description("object containing extra configs to verify the validity of certificates. `options` object supports four fields which maps to same fields in [x509.VerifyOptions struct](https://pkg.go.dev/crypto/x509#VerifyOptions). `DNSName`, `CurrentTime`: Nanoseconds since the Unix Epoch as a number, `MaxConstraintComparisons` and `KeyUsages`. `KeyUsages` is list and can have possible values as in: `\"KeyUsageAny\"`, `\"KeyUsageServerAuth\"`, `\"KeyUsageClientAuth\"`, `\"KeyUsageCodeSigning\"`, `\"KeyUsageEmailProtection\"`, `\"KeyUsageIPSECEndSystem\"`, `\"KeyUsageIPSECTunnel\"`, `\"KeyUsageIPSECUser\"`, `\"KeyUsageTimeStamping\"`, `\"KeyUsageOCSPSigning\"`, `\"KeyUsageMicrosoftServerGatedCrypto\"`, `\"KeyUsageNetscapeServerGatedCrypto\"`, `\"KeyUsageMicrosoftCommercialCodeSigning\"`, `\"KeyUsageMicrosoftKernelCodeSigning\"` "), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), - ), -} +var CryptoX509ParseAndVerifyCertificatesWithOptions = v1.CryptoX509ParseAndVerifyCertificatesWithOptions -var CryptoX509ParseCertificateRequest = &Builtin{ - Name: "crypto.x509.parse_certificate_request", - Description: "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.", - Decl: types.NewFunction( - types.Args( - types.Named("csr", types.S).Description("base64 string containing either a PEM encoded or DER CSR or a string containing a PEM CSR"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("X.509 CSR represented as an object"), - ), -} +var CryptoX509ParseCertificateRequest = v1.CryptoX509ParseCertificateRequest -var CryptoX509ParseKeyPair = &Builtin{ - Name: "crypto.x509.parse_keypair", - Description: "Returns a valid key pair", - Decl: types.NewFunction( - types.Args( - types.Named("cert", types.S).Description("string containing PEM or base64 encoded DER certificates"), - types.Named("pem", types.S).Description("string containing PEM or base64 encoded DER keys"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."), - ), -} -var CryptoX509ParseRSAPrivateKey = &Builtin{ - Name: "crypto.x509.parse_rsa_private_key", - Description: "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.", - Decl: types.NewFunction( - types.Args( - types.Named("pem", types.S).Description("base64 string containing a PEM encoded RSA private key"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWK as an object"), - ), -} +var CryptoX509ParseKeyPair = v1.CryptoX509ParseKeyPair +var CryptoX509ParseRSAPrivateKey = v1.CryptoX509ParseRSAPrivateKey -var CryptoParsePrivateKeys = &Builtin{ - Name: "crypto.parse_private_keys", - Description: `Returns zero or more private keys from the given encoded string containing DER certificate data. - -If the input is empty, the function will return null. The input string should be a list of one or more concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, - Decl: types.NewFunction( - types.Args( - types.Named("keys", types.S).Description("PEM encoded data containing one or more private keys as concatenated blocks. Optionally Base64 encoded."), - ), - types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed private keys represented as objects"), - ), -} +var CryptoParsePrivateKeys = v1.CryptoParsePrivateKeys -var CryptoMd5 = &Builtin{ - Name: "crypto.md5", - Description: "Returns a string representing the input string hashed with the MD5 function", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("MD5-hash of `x`"), - ), -} +var CryptoMd5 = v1.CryptoMd5 -var CryptoSha1 = &Builtin{ - Name: "crypto.sha1", - Description: "Returns a string representing the input string hashed with the SHA1 function", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("SHA1-hash of `x`"), - ), -} +var CryptoSha1 = v1.CryptoSha1 -var CryptoSha256 = &Builtin{ - Name: "crypto.sha256", - Description: "Returns a string representing the input string hashed with the SHA256 function", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("SHA256-hash of `x`"), - ), -} +var CryptoSha256 = v1.CryptoSha256 -var CryptoHmacMd5 = &Builtin{ - Name: "crypto.hmac.md5", - Description: "Returns a string representing the MD5 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("MD5-HMAC of `x`"), - ), -} +var CryptoHmacMd5 = v1.CryptoHmacMd5 -var CryptoHmacSha1 = &Builtin{ - Name: "crypto.hmac.sha1", - Description: "Returns a string representing the SHA1 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("SHA1-HMAC of `x`"), - ), -} +var CryptoHmacSha1 = v1.CryptoHmacSha1 -var CryptoHmacSha256 = &Builtin{ - Name: "crypto.hmac.sha256", - Description: "Returns a string representing the SHA256 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("SHA256-HMAC of `x`"), - ), -} +var CryptoHmacSha256 = v1.CryptoHmacSha256 -var CryptoHmacSha512 = &Builtin{ - Name: "crypto.hmac.sha512", - Description: "Returns a string representing the SHA512 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("SHA512-HMAC of `x`"), - ), -} +var CryptoHmacSha512 = v1.CryptoHmacSha512 -var CryptoHmacEqual = &Builtin{ - Name: "crypto.hmac.equal", - Description: "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.", - Decl: types.NewFunction( - types.Args( - types.Named("mac1", types.S).Description("mac1 to compare"), - types.Named("mac2", types.S).Description("mac2 to compare"), - ), - types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"), - ), -} +var CryptoHmacEqual = v1.CryptoHmacEqual /** * Graphs. */ -var graphs = category("graph") - -var WalkBuiltin = &Builtin{ - Name: "walk", - Relation: true, - Description: "Generates `[path, value]` tuples for all nested documents of `x` (recursively). Queries can use `walk` to traverse documents nested under `x`.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("output", types.NewArray( - []types.Type{ - types.NewArray(nil, types.A), - types.A, - }, - nil, - )).Description("pairs of `path` and `value`: `path` is an array representing the pointer to `value` in `x`. If `path` is assigned a wildcard (`_`), the `walk` function will skip path creation entirely for faster evaluation."), - ), - Categories: graphs, -} -var ReachableBuiltin = &Builtin{ - Name: "graph.reachable", - Description: "Computes the set of reachable nodes in the graph from a set of starting nodes.", - Decl: types.NewFunction( - types.Args( - types.Named("graph", types.NewObject( - nil, - types.NewDynamicProperty( - types.A, - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A)), - )), - ).Description("object containing a set or array of neighboring vertices"), - types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("set or array of root vertices"), - ), - types.Named("output", types.NewSet(types.A)).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"), - ), -} +var WalkBuiltin = v1.WalkBuiltin -var ReachablePathsBuiltin = &Builtin{ - Name: "graph.reachable_paths", - Description: "Computes the set of reachable paths in the graph from a set of starting nodes.", - Decl: types.NewFunction( - types.Args( - types.Named("graph", types.NewObject( - nil, - types.NewDynamicProperty( - types.A, - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A)), - )), - ).Description("object containing a set or array of root vertices"), - types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct? - ), - types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"), - ), -} +var ReachableBuiltin = v1.ReachableBuiltin + +var ReachablePathsBuiltin = v1.ReachablePathsBuiltin /** * Type */ -var typesCat = category("types") - -var IsNumber = &Builtin{ - Name: "is_number", - Description: "Returns `true` if the input value is a number.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is a number, `false` otherwise."), - ), - Categories: typesCat, -} -var IsString = &Builtin{ - Name: "is_string", - Description: "Returns `true` if the input value is a string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is a string, `false` otherwise."), - ), - Categories: typesCat, -} +var IsNumber = v1.IsNumber -var IsBoolean = &Builtin{ - Name: "is_boolean", - Description: "Returns `true` if the input value is a boolean.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is an boolean, `false` otherwise."), - ), - Categories: typesCat, -} +var IsString = v1.IsString -var IsArray = &Builtin{ - Name: "is_array", - Description: "Returns `true` if the input value is an array.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is an array, `false` otherwise."), - ), - Categories: typesCat, -} +var IsBoolean = v1.IsBoolean -var IsSet = &Builtin{ - Name: "is_set", - Description: "Returns `true` if the input value is a set.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is a set, `false` otherwise."), - ), - Categories: typesCat, -} +var IsArray = v1.IsArray -var IsObject = &Builtin{ - Name: "is_object", - Description: "Returns true if the input value is an object", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is an object, `false` otherwise."), - ), - Categories: typesCat, -} +var IsSet = v1.IsSet -var IsNull = &Builtin{ - Name: "is_null", - Description: "Returns `true` if the input value is null.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is null, `false` otherwise."), - ), - Categories: typesCat, -} +var IsObject = v1.IsObject + +var IsNull = v1.IsNull /** * Type Name */ // TypeNameBuiltin returns the type of the input. -var TypeNameBuiltin = &Builtin{ - Name: "type_name", - Description: "Returns the type of its input value.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("type", types.S).Description(`one of "null", "boolean", "number", "string", "array", "object", "set"`), - ), - Categories: typesCat, -} +var TypeNameBuiltin = v1.TypeNameBuiltin /** * HTTP Request */ // Marked non-deterministic because HTTP request results can be non-deterministic. -var HTTPSend = &Builtin{ - Name: "http.send", - Description: "Returns a HTTP response to the given HTTP request.", - Decl: types.NewFunction( - types.Args( - types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - ), - types.Named("response", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))), - ), - Nondeterministic: true, -} +var HTTPSend = v1.HTTPSend /** * GraphQL */ // GraphQLParse returns a pair of AST objects from parsing/validation. -var GraphQLParse = &Builtin{ - Name: "graphql.parse", - Description: "Returns AST objects for a given GraphQL query and schema after validating the query against the schema. Returns undefined if errors were encountered during parsing or validation. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.NewArray([]types.Type{ - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - }, nil)).Description("`output` is of the form `[query_ast, schema_ast]`. If the GraphQL query is valid given the provided schema, then `query_ast` and `schema_ast` are objects describing the ASTs for the query and schema."), - ), -} +var GraphQLParse = v1.GraphQLParse // GraphQLParseAndVerify returns a boolean and a pair of AST object from parsing/validation. -var GraphQLParseAndVerify = &Builtin{ - Name: "graphql.parse_and_verify", - Description: "Returns a boolean indicating success or failure alongside the parsed ASTs for a given GraphQL query and schema after validating the query against the schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - }, nil)).Description(" `output` is of the form `[valid, query_ast, schema_ast]`. If the query is valid given the provided schema, then `valid` is `true`, and `query_ast` and `schema_ast` are objects describing the ASTs for the GraphQL query and schema. Otherwise, `valid` is `false` and `query_ast` and `schema_ast` are `{}`."), - ), -} +var GraphQLParseAndVerify = v1.GraphQLParseAndVerify // GraphQLParseQuery parses the input GraphQL query and returns a JSON // representation of its AST. -var GraphQLParseQuery = &Builtin{ - Name: "graphql.parse_query", - Description: "Returns an AST object for a GraphQL query.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.S), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL query."), - ), -} +var GraphQLParseQuery = v1.GraphQLParseQuery // GraphQLParseSchema parses the input GraphQL schema and returns a JSON // representation of its AST. -var GraphQLParseSchema = &Builtin{ - Name: "graphql.parse_schema", - Description: "Returns an AST object for a GraphQL schema.", - Decl: types.NewFunction( - types.Args( - types.Named("schema", types.S), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL schema."), - ), -} +var GraphQLParseSchema = v1.GraphQLParseSchema // GraphQLIsValid returns true if a GraphQL query is valid with a given // schema, and returns false for all other inputs. -var GraphQLIsValid = &Builtin{ - Name: "graphql.is_valid", - Description: "Checks that a GraphQL query is valid against a given schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.B).Description("`true` if the query is valid under the given schema. `false` otherwise."), - ), -} +var GraphQLIsValid = v1.GraphQLIsValid // GraphQLSchemaIsValid returns true if the input is valid GraphQL schema, // and returns false for all other inputs. -var GraphQLSchemaIsValid = &Builtin{ - Name: "graphql.schema_is_valid", - Description: "Checks that the input is a valid GraphQL schema. The schema can be either a GraphQL string or an AST object from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.B).Description("`true` if the schema is a valid GraphQL schema. `false` otherwise."), - ), -} +var GraphQLSchemaIsValid = v1.GraphQLSchemaIsValid /** * JSON Schema @@ -2811,313 +499,76 @@ var GraphQLSchemaIsValid = &Builtin{ // JSONSchemaVerify returns empty string if the input is valid JSON schema // and returns error string for all other inputs. -var JSONSchemaVerify = &Builtin{ - Name: "json.verify_schema", - Description: "Checks that the input is a valid JSON schema object. The schema can be either a JSON string or an JSON object.", - Decl: types.NewFunction( - types.Args( - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). - Description("the schema to verify"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewAny(types.S, types.Null{}), - }, nil)). - Description("`output` is of the form `[valid, error]`. If the schema is valid, then `valid` is `true`, and `error` is `null`. Otherwise, `valid` is `false` and `error` is a string describing the error."), - ), - Categories: objectCat, -} +var JSONSchemaVerify = v1.JSONSchemaVerify // JSONMatchSchema returns empty array if the document matches the JSON schema, // and returns non-empty array with error objects otherwise. -var JSONMatchSchema = &Builtin{ - Name: "json.match_schema", - Description: "Checks that the document matches the JSON schema.", - Decl: types.NewFunction( - types.Args( - types.Named("document", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). - Description("document to verify by schema"), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). - Description("schema to verify document by"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewArray( - nil, types.NewObject( - []*types.StaticProperty{ - {Key: "error", Value: types.S}, - {Key: "type", Value: types.S}, - {Key: "field", Value: types.S}, - {Key: "desc", Value: types.S}, - }, - nil, - ), - ), - }, nil)). - Description("`output` is of the form `[match, errors]`. If the document is valid given the schema, then `match` is `true`, and `errors` is an empty array. Otherwise, `match` is `false` and `errors` is an array of objects describing the error(s)."), - ), - Categories: objectCat, -} +var JSONMatchSchema = v1.JSONMatchSchema /** * Cloud Provider Helper Functions */ -var providersAWSCat = category("providers.aws") - -var ProvidersAWSSignReqObj = &Builtin{ - Name: "providers.aws.sign_req", - Description: "Signs an HTTP request object for Amazon Web Services. Currently implements [AWS Signature Version 4 request signing](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) by the `Authorization` header method.", - Decl: types.NewFunction( - types.Args( - types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - types.Named("aws_config", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - types.Named("time_ns", types.N), - ), - types.Named("signed_request", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))), - ), - Categories: providersAWSCat, -} + +var ProvidersAWSSignReqObj = v1.ProvidersAWSSignReqObj /** * Rego */ -var RegoParseModule = &Builtin{ - Name: "rego.parse_module", - Description: "Parses the input Rego string and returns an object representation of the AST.", - Decl: types.NewFunction( - types.Args( - types.Named("filename", types.S).Description("file name to attach to AST nodes' locations"), - types.Named("rego", types.S).Description("Rego module"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), // TODO(tsandall): import AST schema - ), -} +var RegoParseModule = v1.RegoParseModule -var RegoMetadataChain = &Builtin{ - Name: "rego.metadata.chain", - Description: `Returns the chain of metadata for the active rule. -Ordered starting at the active rule, going outward to the most distant node in its package ancestry. -A chain entry is a JSON document with two members: "path", an array representing the path of the node; and "annotations", a JSON document containing the annotations declared for the node. -The first entry in the chain always points to the active rule, even if it has no declared annotations (in which case the "annotations" member is not present).`, - Decl: types.NewFunction( - types.Args(), - types.Named("chain", types.NewArray(nil, types.A)).Description("each array entry represents a node in the path ancestry (chain) of the active rule that also has declared annotations"), - ), -} +var RegoMetadataChain = v1.RegoMetadataChain // RegoMetadataRule returns the metadata for the active rule -var RegoMetadataRule = &Builtin{ - Name: "rego.metadata.rule", - Description: "Returns annotations declared for the active rule and using the _rule_ scope.", - Decl: types.NewFunction( - types.Args(), - types.Named("output", types.A).Description("\"rule\" scope annotations for this rule; empty object if no annotations exist"), - ), -} +var RegoMetadataRule = v1.RegoMetadataRule /** * OPA */ // Marked non-deterministic because of unpredictable config/environment-dependent results. -var OPARuntime = &Builtin{ - Name: "opa.runtime", - Description: "Returns an object that describes the runtime environment where OPA is deployed.", - Decl: types.NewFunction( - nil, - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). - Description("includes a `config` key if OPA was started with a configuration file; an `env` key containing the environment variables that the OPA process was started with; includes `version` and `commit` keys containing the version and build commit of OPA."), - ), - Nondeterministic: true, -} +var OPARuntime = v1.OPARuntime /** * Trace */ -var tracing = category("tracing") - -var Trace = &Builtin{ - Name: "trace", - Description: "Emits `note` as a `Note` event in the query explanation. Query explanations show the exact expressions evaluated by OPA during policy execution. For example, `trace(\"Hello There!\")` includes `Note \"Hello There!\"` in the query explanation. To include variables in the message, use `sprintf`. For example, `person := \"Bob\"; trace(sprintf(\"Hello There! %v\", [person]))` will emit `Note \"Hello There! Bob\"` inside of the explanation.", - Decl: types.NewFunction( - types.Args( - types.Named("note", types.S).Description("the note to include"), - ), - types.Named("result", types.B).Description("always `true`"), - ), - Categories: tracing, -} + +var Trace = v1.Trace /** * Glob */ -var GlobMatch = &Builtin{ - Name: "glob.match", - Description: "Parses and matches strings against the glob notation. Not to be confused with `regex.globs_match`.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S), - types.Named("delimiters", types.NewAny( - types.NewArray(nil, types.S), - types.NewNull(), - )).Description("glob pattern delimiters, e.g. `[\".\", \":\"]`, defaults to `[\".\"]` if unset. If `delimiters` is `null`, glob match without delimiter."), - types.Named("match", types.S), - ), - types.Named("result", types.B).Description("true if `match` can be found in `pattern` which is separated by `delimiters`"), - ), -} +var GlobMatch = v1.GlobMatch -var GlobQuoteMeta = &Builtin{ - Name: "glob.quote_meta", - Description: "Returns a string which represents a version of the pattern where all asterisks have been escaped.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S), - ), - types.Named("output", types.S).Description("the escaped string of `pattern`"), - ), - // TODO(sr): example for this was: Calling ``glob.quote_meta("*.github.com", output)`` returns ``\\*.github.com`` as ``output``. -} +var GlobQuoteMeta = v1.GlobQuoteMeta /** * Networking */ -var NetCIDRIntersects = &Builtin{ - Name: "net.cidr_intersects", - Description: "Checks if a CIDR intersects with another CIDR (e.g. `192.168.0.0/16` overlaps with `192.168.1.0/24`). Supports both IPv4 and IPv6 notations.", - Decl: types.NewFunction( - types.Args( - types.Named("cidr1", types.S), - types.Named("cidr2", types.S), - ), - types.Named("result", types.B), - ), -} +var NetCIDRIntersects = v1.NetCIDRIntersects -var NetCIDRExpand = &Builtin{ - Name: "net.cidr_expand", - Description: "Expands CIDR to set of hosts (e.g., `net.cidr_expand(\"192.168.0.0/30\")` generates 4 hosts: `{\"192.168.0.0\", \"192.168.0.1\", \"192.168.0.2\", \"192.168.0.3\"}`).", - Decl: types.NewFunction( - types.Args( - types.Named("cidr", types.S), - ), - types.Named("hosts", types.NewSet(types.S)).Description("set of IP addresses the CIDR `cidr` expands to"), - ), -} +var NetCIDRExpand = v1.NetCIDRExpand -var NetCIDRContains = &Builtin{ - Name: "net.cidr_contains", - Description: "Checks if a CIDR or IP is contained within another CIDR. `output` is `true` if `cidr_or_ip` (e.g. `127.0.0.64/26` or `127.0.0.1`) is contained within `cidr` (e.g. `127.0.0.1/24`) and `false` otherwise. Supports both IPv4 and IPv6 notations.", - Decl: types.NewFunction( - types.Args( - types.Named("cidr", types.S), - types.Named("cidr_or_ip", types.S), - ), - types.Named("result", types.B), - ), -} +var NetCIDRContains = v1.NetCIDRContains -var NetCIDRContainsMatches = &Builtin{ - Name: "net.cidr_contains_matches", - Description: "Checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches. " + - "This function is similar to `net.cidr_contains` except it allows callers to pass collections of CIDRs or IPs as arguments and returns the matches (as opposed to a boolean result indicating a match between two CIDRs/IPs).", - Decl: types.NewFunction( - types.Args( - types.Named("cidrs", netCidrContainsMatchesOperandType), - types.Named("cidrs_or_ips", netCidrContainsMatchesOperandType), - ), - types.Named("output", types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil))).Description("tuples identifying matches where `cidrs_or_ips` are contained within `cidrs`"), - ), -} +var NetCIDRContainsMatches = v1.NetCIDRContainsMatches -var NetCIDRMerge = &Builtin{ - Name: "net.cidr_merge", - Description: "Merges IP addresses and subnets into the smallest possible list of CIDRs (e.g., `net.cidr_merge([\"192.0.128.0/24\", \"192.0.129.0/24\"])` generates `{\"192.0.128.0/23\"}`." + - `This function merges adjacent subnets where possible, those contained within others and also removes any duplicates. -Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/128").`, - Decl: types.NewFunction( - types.Args( - types.Named("addrs", types.NewAny( - types.NewArray(nil, types.NewAny(types.S)), - types.NewSet(types.S), - )).Description("CIDRs or IP addresses"), - ), - types.Named("output", types.NewSet(types.S)).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"), - ), -} +var NetCIDRMerge = v1.NetCIDRMerge -var NetCIDRIsValid = &Builtin{ - Name: "net.cidr_is_valid", - Description: "Parses an IPv4/IPv6 CIDR and returns a boolean indicating if the provided CIDR is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("cidr", types.S), - ), - types.Named("result", types.B), - ), -} - -var netCidrContainsMatchesOperandType = types.NewAny( - types.S, - types.NewArray(nil, types.NewAny( - types.S, - types.NewArray(nil, types.A), - )), - types.NewSet(types.NewAny( - types.S, - types.NewArray(nil, types.A), - )), - types.NewObject(nil, types.NewDynamicProperty( - types.S, - types.NewAny( - types.S, - types.NewArray(nil, types.A), - ), - )), -) +var NetCIDRIsValid = v1.NetCIDRIsValid // Marked non-deterministic because DNS resolution results can be non-deterministic. -var NetLookupIPAddr = &Builtin{ - Name: "net.lookup_ip_addr", - Description: "Returns the set of IP addresses (both v4 and v6) that the passed-in `name` resolves to using the standard name resolution mechanisms available.", - Decl: types.NewFunction( - types.Args( - types.Named("name", types.S).Description("domain name to resolve"), - ), - types.Named("addrs", types.NewSet(types.S)).Description("IP addresses (v4 and v6) that `name` resolves to"), - ), - Nondeterministic: true, -} +var NetLookupIPAddr = v1.NetLookupIPAddr /** * Semantic Versions */ -var SemVerIsValid = &Builtin{ - Name: "semver.is_valid", - Description: "Validates that the input is a valid SemVer string.", - Decl: types.NewFunction( - types.Args( - types.Named("vsn", types.A), - ), - types.Named("result", types.B).Description("`true` if `vsn` is a valid SemVer; `false` otherwise"), - ), -} +var SemVerIsValid = v1.SemVerIsValid -var SemVerCompare = &Builtin{ - Name: "semver.compare", - Description: "Compares valid SemVer formatted version strings.", - Decl: types.NewFunction( - types.Args( - types.Named("a", types.S), - types.Named("b", types.S), - ), - types.Named("result", types.N).Description("`-1` if `a < b`; `1` if `a > b`; `0` if `a == b`"), - ), -} +var SemVerCompare = v1.SemVerCompare /** * Printing @@ -3128,248 +579,56 @@ var SemVerCompare = &Builtin{ // operands may be of any type. Furthermore, unlike other built-in functions, // undefined operands DO NOT cause the print() function to fail during // evaluation. -var Print = &Builtin{ - Name: "print", - Decl: types.NewVariadicFunction(nil, types.A, nil), -} +var Print = v1.Print // InternalPrint represents the internal implementation of the print() function. // The compiler rewrites print() calls to refer to the internal implementation. -var InternalPrint = &Builtin{ - Name: "internal.print", - Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.NewSet(types.A))}, nil), -} +var InternalPrint = v1.InternalPrint /** * Deprecated built-ins. */ // SetDiff has been replaced by the minus built-in. -var SetDiff = &Builtin{ - Name: "set_diff", - Decl: types.NewFunction( - types.Args( - types.NewSet(types.A), - types.NewSet(types.A), - ), - types.NewSet(types.A), - ), - deprecated: true, -} +var SetDiff = v1.SetDiff // NetCIDROverlap has been replaced by the `net.cidr_contains` built-in. -var NetCIDROverlap = &Builtin{ - Name: "net.cidr_overlap", - Decl: types.NewFunction( - types.Args( - types.S, - types.S, - ), - types.B, - ), - deprecated: true, -} +var NetCIDROverlap = v1.NetCIDROverlap // CastArray checks the underlying type of the input. If it is array or set, an array // containing the values is returned. If it is not an array, an error is thrown. -var CastArray = &Builtin{ - Name: "cast_array", - Decl: types.NewFunction( - types.Args(types.A), - types.NewArray(nil, types.A), - ), - deprecated: true, -} +var CastArray = v1.CastArray // CastSet checks the underlying type of the input. // If it is a set, the set is returned. // If it is an array, the array is returned in set form (all duplicates removed) // If neither, an error is thrown -var CastSet = &Builtin{ - Name: "cast_set", - Decl: types.NewFunction( - types.Args(types.A), - types.NewSet(types.A), - ), - deprecated: true, -} +var CastSet = v1.CastSet // CastString returns input if it is a string; if not returns error. // For formatting variables, see sprintf -var CastString = &Builtin{ - Name: "cast_string", - Decl: types.NewFunction( - types.Args(types.A), - types.S, - ), - deprecated: true, -} +var CastString = v1.CastString // CastBoolean returns input if it is a boolean; if not returns error. -var CastBoolean = &Builtin{ - Name: "cast_boolean", - Decl: types.NewFunction( - types.Args(types.A), - types.B, - ), - deprecated: true, -} +var CastBoolean = v1.CastBoolean // CastNull returns null if input is null; if not returns error. -var CastNull = &Builtin{ - Name: "cast_null", - Decl: types.NewFunction( - types.Args(types.A), - types.NewNull(), - ), - deprecated: true, -} +var CastNull = v1.CastNull // CastObject returns the given object if it is null; throws an error otherwise -var CastObject = &Builtin{ - Name: "cast_object", - Decl: types.NewFunction( - types.Args(types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - ), - deprecated: true, -} +var CastObject = v1.CastObject // RegexMatchDeprecated declares `re_match` which has been deprecated. Use `regex.match` instead. -var RegexMatchDeprecated = &Builtin{ - Name: "re_match", - Decl: types.NewFunction( - types.Args( - types.S, - types.S, - ), - types.B, - ), - deprecated: true, -} +var RegexMatchDeprecated = v1.RegexMatchDeprecated // All takes a list and returns true if all of the items // are true. A collection of length 0 returns true. -var All = &Builtin{ - Name: "all", - Decl: types.NewFunction( - types.Args( - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - ), - ), - types.B, - ), - deprecated: true, -} +var All = v1.All // Any takes a collection and returns true if any of the items // is true. A collection of length 0 returns false. -var Any = &Builtin{ - Name: "any", - Decl: types.NewFunction( - types.Args( - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - ), - ), - types.B, - ), - deprecated: true, -} +var Any = v1.Any // Builtin represents a built-in function supported by OPA. Every built-in // function is uniquely identified by a name. -type Builtin struct { - Name string `json:"name"` // Unique name of built-in function, e.g., (arg1,arg2,...,argN) - Description string `json:"description,omitempty"` // Description of what the built-in function does. - - // Categories of the built-in function. Omitted for namespaced - // built-ins, i.e. "array.concat" is taken to be of the "array" category. - // "minus" for example, is part of two categories: numbers and sets. (NOTE(sr): aspirational) - Categories []string `json:"categories,omitempty"` - - Decl *types.Function `json:"decl"` // Built-in function type declaration. - Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset. - Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation. - deprecated bool // Indicates if the built-in has been deprecated. - Nondeterministic bool `json:"nondeterministic,omitempty"` // Indicates if the built-in returns non-deterministic results. -} - -// category is a helper for specifying a Builtin's Categories -func category(cs ...string) []string { - return cs -} - -// Minimal returns a shallow copy of b with the descriptions and categories and -// named arguments stripped out. -func (b *Builtin) Minimal() *Builtin { - cpy := *b - fargs := b.Decl.FuncArgs() - if fargs.Variadic != nil { - cpy.Decl = types.NewVariadicFunction(fargs.Args, fargs.Variadic, b.Decl.Result()) - } else { - cpy.Decl = types.NewFunction(fargs.Args, b.Decl.Result()) - } - cpy.Categories = nil - cpy.Description = "" - return &cpy -} - -// IsDeprecated returns true if the Builtin function is deprecated and will be removed in a future release. -func (b *Builtin) IsDeprecated() bool { - return b.deprecated -} - -// IsDeterministic returns true if the Builtin function returns non-deterministic results. -func (b *Builtin) IsNondeterministic() bool { - return b.Nondeterministic -} - -// Expr creates a new expression for the built-in with the given operands. -func (b *Builtin) Expr(operands ...*Term) *Expr { - ts := make([]*Term, len(operands)+1) - ts[0] = NewTerm(b.Ref()) - for i := range operands { - ts[i+1] = operands[i] - } - return &Expr{ - Terms: ts, - } -} - -// Call creates a new term for the built-in with the given operands. -func (b *Builtin) Call(operands ...*Term) *Term { - call := make(Call, len(operands)+1) - call[0] = NewTerm(b.Ref()) - for i := range operands { - call[i+1] = operands[i] - } - return NewTerm(call) -} - -// Ref returns a Ref that refers to the built-in function. -func (b *Builtin) Ref() Ref { - parts := strings.Split(b.Name, ".") - ref := make(Ref, len(parts)) - ref[0] = VarTerm(parts[0]) - for i := 1; i < len(parts); i++ { - ref[i] = StringTerm(parts[i]) - } - return ref -} - -// IsTargetPos returns true if a variable in the i-th position will be bound by -// evaluating the call expression. -func (b *Builtin) IsTargetPos(i int) bool { - return len(b.Decl.FuncArgs().Args) == i -} - -func init() { - BuiltinMap = map[string]*Builtin{} - for _, b := range DefaultBuiltins { - RegisterBuiltin(b) - } -} +type Builtin = v1.Builtin diff --git a/vendor/github.com/open-policy-agent/opa/ast/capabilities.go b/vendor/github.com/open-policy-agent/opa/ast/capabilities.go index 3b95d79e572..bc7278a885a 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/capabilities.go +++ b/vendor/github.com/open-policy-agent/opa/ast/capabilities.go @@ -5,228 +5,54 @@ package ast import ( - "bytes" - _ "embed" - "encoding/json" - "fmt" "io" - "os" - "sort" - "strings" - caps "github.com/open-policy-agent/opa/capabilities" - "github.com/open-policy-agent/opa/internal/semver" - "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // VersonIndex contains an index from built-in function name, language feature, // and future rego keyword to version number. During the build, this is used to // create an index of the minimum version required for the built-in/feature/kw. -type VersionIndex struct { - Builtins map[string]semver.Version `json:"builtins"` - Features map[string]semver.Version `json:"features"` - Keywords map[string]semver.Version `json:"keywords"` -} - -// NOTE(tsandall): this file is generated by internal/cmd/genversionindex/main.go -// and run as part of go:generate. We generate the version index as part of the -// build process because it's relatively expensive to build (it takes ~500ms on -// my machine) and never changes. -// -//go:embed version_index.json -var versionIndexBs []byte - -var minVersionIndex = func() VersionIndex { - var vi VersionIndex - err := json.Unmarshal(versionIndexBs, &vi) - if err != nil { - panic(err) - } - return vi -}() +type VersionIndex = v1.VersionIndex // In the compiler, we used this to check that we're OK working with ref heads. // If this isn't present, we'll fail. This is to ensure that older versions of // OPA can work with policies that we're compiling -- if they don't know ref // heads, they wouldn't be able to parse them. -const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes" -const FeatureRefHeads = "rule_head_refs" -const FeatureRegoV1Import = "rego_v1_import" +const FeatureRefHeadStringPrefixes = v1.FeatureRefHeadStringPrefixes +const FeatureRefHeads = v1.FeatureRefHeads +const FeatureRegoV1 = v1.FeatureRegoV1 +const FeatureRegoV1Import = v1.FeatureRegoV1Import // Capabilities defines a structure containing data that describes the capabilities // or features supported by a particular version of OPA. -type Capabilities struct { - Builtins []*Builtin `json:"builtins,omitempty"` - FutureKeywords []string `json:"future_keywords,omitempty"` - WasmABIVersions []WasmABIVersion `json:"wasm_abi_versions,omitempty"` - - // Features is a bit of a mixed bag for checking that an older version of OPA - // is able to do what needs to be done. - // TODO(sr): find better words ^^ - Features []string `json:"features,omitempty"` - - // allow_net is an array of hostnames or IP addresses, that an OPA instance is - // allowed to connect to. - // If omitted, ANY host can be connected to. If empty, NO host can be connected to. - // As of now, this only controls fetching remote refs for using JSON Schemas in - // the type checker. - // TODO(sr): support ports to further restrict connection peers - // TODO(sr): support restricting `http.send` using the same mechanism (see https://github.com/open-policy-agent/opa/issues/3665) - AllowNet []string `json:"allow_net,omitempty"` -} +type Capabilities = v1.Capabilities // WasmABIVersion captures the Wasm ABI version. Its `Minor` version is indicating // backwards-compatible changes. -type WasmABIVersion struct { - Version int `json:"version"` - Minor int `json:"minor_version"` -} +type WasmABIVersion = v1.WasmABIVersion // CapabilitiesForThisVersion returns the capabilities of this version of OPA. func CapabilitiesForThisVersion() *Capabilities { - f := &Capabilities{} - - for _, vers := range capabilities.ABIVersions() { - f.WasmABIVersions = append(f.WasmABIVersions, WasmABIVersion{Version: vers[0], Minor: vers[1]}) - } - - f.Builtins = make([]*Builtin, len(Builtins)) - copy(f.Builtins, Builtins) - sort.Slice(f.Builtins, func(i, j int) bool { - return f.Builtins[i].Name < f.Builtins[j].Name - }) - - for kw := range futureKeywords { - f.FutureKeywords = append(f.FutureKeywords, kw) - } - sort.Strings(f.FutureKeywords) - - f.Features = []string{ - FeatureRefHeadStringPrefixes, - FeatureRefHeads, - FeatureRegoV1Import, - } - - return f + return v1.CapabilitiesForThisVersion(v1.CapabilitiesRegoVersion(DefaultRegoVersion)) } // LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r. func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) { - d := util.NewJSONDecoder(r) - var c Capabilities - return &c, d.Decode(&c) + return v1.LoadCapabilitiesJSON(r) } // LoadCapabilitiesVersion loads a JSON serialized capabilities structure from the specific version. func LoadCapabilitiesVersion(version string) (*Capabilities, error) { - cvs, err := LoadCapabilitiesVersions() - if err != nil { - return nil, err - } - - for _, cv := range cvs { - if cv == version { - cont, err := caps.FS.ReadFile(cv + ".json") - if err != nil { - return nil, err - } - - return LoadCapabilitiesJSON(bytes.NewReader(cont)) - } - - } - return nil, fmt.Errorf("no capabilities version found %v", version) + return v1.LoadCapabilitiesVersion(version) } // LoadCapabilitiesFile loads a JSON serialized capabilities structure from a file. func LoadCapabilitiesFile(file string) (*Capabilities, error) { - fd, err := os.Open(file) - if err != nil { - return nil, err - } - defer fd.Close() - return LoadCapabilitiesJSON(fd) + return v1.LoadCapabilitiesFile(file) } // LoadCapabilitiesVersions loads all capabilities versions func LoadCapabilitiesVersions() ([]string, error) { - ents, err := caps.FS.ReadDir(".") - if err != nil { - return nil, err - } - - capabilitiesVersions := make([]string, 0, len(ents)) - for _, ent := range ents { - capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1)) - } - return capabilitiesVersions, nil -} - -// MinimumCompatibleVersion returns the minimum compatible OPA version based on -// the built-ins, features, and keywords in c. -func (c *Capabilities) MinimumCompatibleVersion() (string, bool) { - - var maxVersion semver.Version - - // this is the oldest OPA release that includes capabilities - if err := maxVersion.Set("0.17.0"); err != nil { - panic("unreachable") - } - - for _, bi := range c.Builtins { - v, ok := minVersionIndex.Builtins[bi.Name] - if !ok { - return "", false - } - if v.Compare(maxVersion) > 0 { - maxVersion = v - } - } - - for _, kw := range c.FutureKeywords { - v, ok := minVersionIndex.Keywords[kw] - if !ok { - return "", false - } - if v.Compare(maxVersion) > 0 { - maxVersion = v - } - } - - for _, feat := range c.Features { - v, ok := minVersionIndex.Features[feat] - if !ok { - return "", false - } - if v.Compare(maxVersion) > 0 { - maxVersion = v - } - } - - return maxVersion.String(), true -} - -func (c *Capabilities) ContainsFeature(feature string) bool { - for _, f := range c.Features { - if f == feature { - return true - } - } - return false -} - -// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name -// will be overwritten. -func (c *Capabilities) addBuiltinSorted(bi *Builtin) { - i := sort.Search(len(c.Builtins), func(x int) bool { - return c.Builtins[x].Name >= bi.Name - }) - if i < len(c.Builtins) && bi.Name == c.Builtins[i].Name { - c.Builtins[i] = bi - return - } - c.Builtins = append(c.Builtins, nil) - copy(c.Builtins[i+1:], c.Builtins[i:]) - c.Builtins[i] = bi + return v1.LoadCapabilitiesVersions() } diff --git a/vendor/github.com/open-policy-agent/opa/ast/check.go b/vendor/github.com/open-policy-agent/opa/ast/check.go index 03d31123cf5..4cf00436df1 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/check.go +++ b/vendor/github.com/open-policy-agent/opa/ast/check.go @@ -5,1317 +5,18 @@ package ast import ( - "fmt" - "sort" - "strings" - - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) -type varRewriter func(Ref) Ref - -// exprChecker defines the interface for executing type checking on a single -// expression. The exprChecker must update the provided TypeEnv with inferred -// types of vars. -type exprChecker func(*TypeEnv, *Expr) *Error - -// typeChecker implements type checking on queries and rules. Errors are -// accumulated on the typeChecker so that a single run can report multiple -// issues. -type typeChecker struct { - builtins map[string]*Builtin - required *Capabilities - errs Errors - exprCheckers map[string]exprChecker - varRewriter varRewriter - ss *SchemaSet - allowNet []string - input types.Type - allowUndefinedFuncs bool - schemaTypes map[string]types.Type -} - -// newTypeChecker returns a new typeChecker object that has no errors. -func newTypeChecker() *typeChecker { - return &typeChecker{ - builtins: make(map[string]*Builtin), - schemaTypes: make(map[string]types.Type), - exprCheckers: map[string]exprChecker{ - "eq": checkExprEq, - }, - } -} - -func (tc *typeChecker) newEnv(exist *TypeEnv) *TypeEnv { - if exist != nil { - return exist.wrap() - } - env := newTypeEnv(tc.copy) - if tc.input != nil { - env.tree.Put(InputRootRef, tc.input) - } - return env -} - -func (tc *typeChecker) copy() *typeChecker { - return newTypeChecker(). - WithVarRewriter(tc.varRewriter). - WithSchemaSet(tc.ss). - WithAllowNet(tc.allowNet). - WithInputType(tc.input). - WithAllowUndefinedFunctionCalls(tc.allowUndefinedFuncs). - WithBuiltins(tc.builtins). - WithRequiredCapabilities(tc.required) -} - -func (tc *typeChecker) WithRequiredCapabilities(c *Capabilities) *typeChecker { - tc.required = c - return tc -} - -func (tc *typeChecker) WithBuiltins(builtins map[string]*Builtin) *typeChecker { - tc.builtins = builtins - return tc -} - -func (tc *typeChecker) WithSchemaSet(ss *SchemaSet) *typeChecker { - tc.ss = ss - return tc -} - -func (tc *typeChecker) WithAllowNet(hosts []string) *typeChecker { - tc.allowNet = hosts - return tc -} - -func (tc *typeChecker) WithVarRewriter(f varRewriter) *typeChecker { - tc.varRewriter = f - return tc -} - -func (tc *typeChecker) WithInputType(tpe types.Type) *typeChecker { - tc.input = tpe - return tc -} - -// WithAllowUndefinedFunctionCalls sets the type checker to allow references to undefined functions. -// Additionally, the 'CheckUndefinedFuncs' and 'CheckSafetyRuleBodies' compiler stages are skipped. -func (tc *typeChecker) WithAllowUndefinedFunctionCalls(allow bool) *typeChecker { - tc.allowUndefinedFuncs = allow - return tc -} - -// Env returns a type environment for the specified built-ins with any other -// global types configured on the checker. In practice, this is the default -// environment that other statements will be checked against. -func (tc *typeChecker) Env(builtins map[string]*Builtin) *TypeEnv { - env := tc.newEnv(nil) - for _, bi := range builtins { - env.tree.Put(bi.Ref(), bi.Decl) - } - return env -} - -// CheckBody runs type checking on the body and returns a TypeEnv if no errors -// are found. The resulting TypeEnv wraps the provided one. The resulting -// TypeEnv will be able to resolve types of vars contained in the body. -func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) { - - errors := []*Error{} - env = tc.newEnv(env) - - WalkExprs(body, func(expr *Expr) bool { - - closureErrs := tc.checkClosures(env, expr) - for _, err := range closureErrs { - errors = append(errors, err) - } - - hasClosureErrors := len(closureErrs) > 0 - - vis := newRefChecker(env, tc.varRewriter) - NewGenericVisitor(vis.Visit).Walk(expr) - for _, err := range vis.errs { - errors = append(errors, err) - } - - hasRefErrors := len(vis.errs) > 0 - - if err := tc.checkExpr(env, expr); err != nil { - // Suppress this error if a more actionable one has occurred. In - // this case, if an error occurred in a ref or closure contained in - // this expression, and the error is due to a nil type, then it's - // likely to be the result of the more specific error. - skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err) - if !skip { - errors = append(errors, err) - } - } - return true - }) - - tc.err(errors) - return env, errors -} - -// CheckTypes runs type checking on the rules returns a TypeEnv if no errors -// are found. The resulting TypeEnv wraps the provided one. The resulting -// TypeEnv will be able to resolve types of refs that refer to rules. -func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T, as *AnnotationSet) (*TypeEnv, Errors) { - env = tc.newEnv(env) - for _, s := range sorted { - tc.checkRule(env, as, s.(*Rule)) - } - tc.errs.Sort() - return env, tc.errs -} - -func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors { - var result Errors - WalkClosures(expr, func(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension: - _, errs := tc.copy().CheckBody(env, x.Body) - if len(errs) > 0 { - result = errs - return true - } - case *SetComprehension: - _, errs := tc.copy().CheckBody(env, x.Body) - if len(errs) > 0 { - result = errs - return true - } - case *ObjectComprehension: - _, errs := tc.copy().CheckBody(env, x.Body) - if len(errs) > 0 { - result = errs - return true - } - } - return false - }) - return result -} - -func (tc *typeChecker) getSchemaType(schemaAnnot *SchemaAnnotation, rule *Rule) (types.Type, *Error) { - if refType, exists := tc.schemaTypes[schemaAnnot.Schema.String()]; exists { - return refType, nil - } - - refType, err := processAnnotation(tc.ss, schemaAnnot, rule, tc.allowNet) - if err != nil { - return nil, err - } - - if refType == nil { - return nil, nil - } - - tc.schemaTypes[schemaAnnot.Schema.String()] = refType - return refType, nil - -} - -func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { - - env = env.wrap() - - schemaAnnots := getRuleAnnotation(as, rule) - for _, schemaAnnot := range schemaAnnots { - refType, err := tc.getSchemaType(schemaAnnot, rule) - if err != nil { - tc.err([]*Error{err}) - continue - } - - ref := schemaAnnot.Path - // if we do not have a ref or a reftype, we should not evaluate this rule. - if ref == nil || refType == nil { - continue - } - - prefixRef, t := getPrefix(env, ref) - if t == nil || len(prefixRef) == len(ref) { - env.tree.Put(ref, refType) - } else { - newType, err := override(ref[len(prefixRef):], t, refType, rule) - if err != nil { - tc.err([]*Error{err}) - continue - } - env.tree.Put(prefixRef, newType) - } - } - - cpy, err := tc.CheckBody(env, rule.Body) - env = env.next - path := rule.Ref() - - if len(err) > 0 { - // if the rule/function contains an error, add it to the type env so - // that expressions that refer to this rule/function do not encounter - // type errors. - env.tree.Put(path, types.A) - return - } - - var tpe types.Type - - if len(rule.Head.Args) > 0 { - // If args are not referred to in body, infer as any. - WalkVars(rule.Head.Args, func(v Var) bool { - if cpy.Get(v) == nil { - cpy.tree.PutOne(v, types.A) - } - return false - }) - - // Construct function type. - args := make([]types.Type, len(rule.Head.Args)) - for i := 0; i < len(rule.Head.Args); i++ { - args[i] = cpy.Get(rule.Head.Args[i]) - } - - f := types.NewFunction(args, cpy.Get(rule.Head.Value)) - - tpe = f - } else { - switch rule.Head.RuleKind() { - case SingleValue: - typeV := cpy.Get(rule.Head.Value) - if !path.IsGround() { - // e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z] - objPath := path.DynamicSuffix() - path = path.GroundPrefix() - - var err error - tpe, err = nestedObject(cpy, objPath, typeV) - if err != nil { - tc.err([]*Error{NewError(TypeErr, rule.Head.Location, err.Error())}) - tpe = nil - } - } else { - if typeV != nil { - tpe = typeV - } - } - case MultiValue: - typeK := cpy.Get(rule.Head.Key) - if typeK != nil { - tpe = types.NewSet(typeK) - } - } - } - - if tpe != nil { - env.tree.Insert(path, tpe, env) - } -} - -// nestedObject creates a nested structure of object types, where each term on path corresponds to a level in the -// nesting. Each term in the path only contributes to the dynamic portion of its corresponding object. -func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) { - if len(path) == 0 { - return tpe, nil - } - - k := path[0] - typeV, err := nestedObject(env, path[1:], tpe) - if err != nil { - return nil, err - } - if typeV == nil { - return nil, nil - } - - var dynamicProperty *types.DynamicProperty - typeK := env.Get(k) - if typeK == nil { - return nil, nil - } - dynamicProperty = types.NewDynamicProperty(typeK, typeV) - - return types.NewObject(nil, dynamicProperty), nil -} - -func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error { - if err := tc.checkExprWith(env, expr, 0); err != nil { - return err - } - if !expr.IsCall() { - return nil - } - - operator := expr.Operator().String() - - // If the type checker wasn't provided with a required capabilities - // structure then just skip. In some cases, type checking might be run - // without the need to record what builtins are required. - if tc.required != nil { - if bi, ok := tc.builtins[operator]; ok { - tc.required.addBuiltinSorted(bi) - } - } - - checker := tc.exprCheckers[operator] - if checker != nil { - return checker(env, expr) - } - - return tc.checkExprBuiltin(env, expr) -} - -func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error { - - args := expr.Operands() - pre := getArgTypes(env, args) - - // NOTE(tsandall): undefined functions will have been caught earlier in the - // compiler. We check for undefined functions before the safety check so - // that references to non-existent functions result in undefined function - // errors as opposed to unsafe var errors. - // - // We cannot run type checking before the safety check because part of the - // type checker relies on reordering (in particular for references to local - // vars). - name := expr.Operator() - tpe := env.Get(name) - - if tpe == nil { - if tc.allowUndefinedFuncs { - return nil - } - return NewError(TypeErr, expr.Location, "undefined function %v", name) - } - - // check if the expression refers to a function that contains an error - _, ok := tpe.(types.Any) - if ok { - return nil - } - - ftpe, ok := tpe.(*types.Function) - if !ok { - return NewError(TypeErr, expr.Location, "undefined function %v", name) - } - - fargs := ftpe.FuncArgs() - namedFargs := ftpe.NamedFuncArgs() - - if ftpe.Result() != nil { - fargs.Args = append(fargs.Args, ftpe.Result()) - namedFargs.Args = append(namedFargs.Args, ftpe.NamedResult()) - } - - if len(args) > len(fargs.Args) && fargs.Variadic == nil { - return newArgError(expr.Location, name, "too many arguments", pre, namedFargs) - } - - if len(args) < len(ftpe.FuncArgs().Args) { - return newArgError(expr.Location, name, "too few arguments", pre, namedFargs) - } - - for i := range args { - if !unify1(env, args[i], fargs.Arg(i), false) { - post := make([]types.Type, len(args)) - for i := range args { - post[i] = env.Get(args[i]) - } - return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs) - } - } - - return nil -} - -func checkExprEq(env *TypeEnv, expr *Expr) *Error { - - pre := getArgTypes(env, expr.Operands()) - exp := Equality.Decl.FuncArgs() - - if len(pre) < len(exp.Args) { - return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, exp) - } - - if len(exp.Args) < len(pre) { - return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, exp) - } - - a, b := expr.Operand(0), expr.Operand(1) - typeA, typeB := env.Get(a), env.Get(b) - - if !unify2(env, a, typeA, b, typeB) { - err := NewError(TypeErr, expr.Location, "match error") - err.Details = &UnificationErrDetail{ - Left: typeA, - Right: typeB, - } - return err - } - - return nil -} - -func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error { - if i == len(expr.With) { - return nil - } - - target, value := expr.With[i].Target, expr.With[i].Value - targetType, valueType := env.Get(target), env.Get(value) - - if t, ok := targetType.(*types.Function); ok { // built-in function replacement - switch v := valueType.(type) { - case *types.Function: // ...by function - if !unifies(targetType, valueType) { - return newArgError(expr.With[i].Loc(), target.Value.(Ref), "arity mismatch", v.FuncArgs().Args, t.NamedFuncArgs()) - } - default: // ... by value, nothing to check - } - } - - return tc.checkExprWith(env, expr, i+1) -} - -func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool { - - nilA := types.Nil(typeA) - nilB := types.Nil(typeB) - - if nilA && !nilB { - return unify1(env, a, typeB, false) - } else if nilB && !nilA { - return unify1(env, b, typeA, false) - } else if !nilA && !nilB { - return unifies(typeA, typeB) - } - - switch a.Value.(type) { - case *Array: - return unify2Array(env, a, b) - case *object: - return unify2Object(env, a, b) - case Var: - switch b.Value.(type) { - case Var: - return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) - case *Array: - return unify2Array(env, b, a) - case *object: - return unify2Object(env, b, a) - } - } - - return false -} - -func unify2Array(env *TypeEnv, a *Term, b *Term) bool { - arr := a.Value.(*Array) - switch bv := b.Value.(type) { - case *Array: - if arr.Len() == bv.Len() { - for i := 0; i < arr.Len(); i++ { - if !unify2(env, arr.Elem(i), env.Get(arr.Elem(i)), bv.Elem(i), env.Get(bv.Elem(i))) { - return false - } - } - return true - } - case Var: - return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) - } - return false -} - -func unify2Object(env *TypeEnv, a *Term, b *Term) bool { - obj := a.Value.(Object) - switch bv := b.Value.(type) { - case *object: - cv := obj.Intersect(bv) - if obj.Len() == bv.Len() && bv.Len() == len(cv) { - for i := range cv { - if !unify2(env, cv[i][1], env.Get(cv[i][1]), cv[i][2], env.Get(cv[i][2])) { - return false - } - } - return true - } - case Var: - return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) - } - return false -} - -func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool { - switch v := term.Value.(type) { - case *Array: - switch tpe := tpe.(type) { - case *types.Array: - return unify1Array(env, v, tpe, union) - case types.Any: - if types.Compare(tpe, types.A) == 0 { - for i := 0; i < v.Len(); i++ { - unify1(env, v.Elem(i), types.A, true) - } - return true - } - unifies := false - for i := range tpe { - unifies = unify1(env, term, tpe[i], true) || unifies - } - return unifies - } - return false - case *object: - switch tpe := tpe.(type) { - case *types.Object: - return unify1Object(env, v, tpe, union) - case types.Any: - if types.Compare(tpe, types.A) == 0 { - v.Foreach(func(key, value *Term) { - unify1(env, key, types.A, true) - unify1(env, value, types.A, true) - }) - return true - } - unifies := false - for i := range tpe { - unifies = unify1(env, term, tpe[i], true) || unifies - } - return unifies - } - return false - case Set: - switch tpe := tpe.(type) { - case *types.Set: - return unify1Set(env, v, tpe, union) - case types.Any: - if types.Compare(tpe, types.A) == 0 { - v.Foreach(func(elem *Term) { - unify1(env, elem, types.A, true) - }) - return true - } - unifies := false - for i := range tpe { - unifies = unify1(env, term, tpe[i], true) || unifies - } - return unifies - } - return false - case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return unifies(env.Get(v), tpe) - case Var: - if !union { - if exist := env.Get(v); exist != nil { - return unifies(exist, tpe) - } - env.tree.PutOne(term.Value, tpe) - } else { - env.tree.PutOne(term.Value, types.Or(env.Get(v), tpe)) - } - return true - default: - if !IsConstant(v) { - panic("unreachable") - } - return unifies(env.Get(term), tpe) - } -} - -func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool { - if val.Len() != tpe.Len() && tpe.Dynamic() == nil { - return false - } - for i := 0; i < val.Len(); i++ { - if !unify1(env, val.Elem(i), tpe.Select(i), union) { - return false - } - } - return true -} - -func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool { - if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil { - return false - } - stop := val.Until(func(k, v *Term) bool { - if IsConstant(k.Value) { - if child := selectConstant(tpe, k); child != nil { - if !unify1(env, v, child, union) { - return true - } - } else { - return true - } - } else { - // Inferring type of value under dynamic key would involve unioning - // with all property values of tpe whose keys unify. For now, type - // these values as Any. We can investigate stricter inference in - // the future. - unify1(env, v, types.A, union) - } - return false - }) - return !stop -} - -func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool { - of := types.Values(tpe) - return !val.Until(func(elem *Term) bool { - return !unify1(env, elem, of, union) - }) -} - -func (tc *typeChecker) err(errors []*Error) { - tc.errs = append(tc.errs, errors...) -} - -type refChecker struct { - env *TypeEnv - errs Errors - varRewriter varRewriter -} - -func rewriteVarsNop(node Ref) Ref { - return node -} - -func newRefChecker(env *TypeEnv, f varRewriter) *refChecker { - - if f == nil { - f = rewriteVarsNop - } - - return &refChecker{ - env: env, - errs: nil, - varRewriter: f, - } -} - -func (rc *refChecker) Visit(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return true - case *Expr: - switch terms := x.Terms.(type) { - case []*Term: - for i := 1; i < len(terms); i++ { - NewGenericVisitor(rc.Visit).Walk(terms[i]) - } - return true - case *Term: - NewGenericVisitor(rc.Visit).Walk(terms) - return true - } - case Ref: - if err := rc.checkApply(rc.env, x); err != nil { - rc.errs = append(rc.errs, err) - return true - } - if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil { - rc.errs = append(rc.errs, err) - } - } - return false -} - -func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error { - switch tpe := curr.Get(ref).(type) { - case *types.Function: // NOTE(sr): We don't support first-class functions, except for `with`. - return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe) - } - - return nil -} - -func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error { - - if idx == len(ref) { - return nil - } - - head := ref[idx] - - // NOTE(sr): as long as package statements are required, this isn't possible: - // the shortest possible rule ref is data.a.b (b is idx 2), idx 1 and 2 need to - // be strings or vars. - if idx == 1 || idx == 2 { - switch head.Value.(type) { - case Var, String: // OK - default: - have := rc.env.Get(head.Value) - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node)) - } - } - - if v, ok := head.Value.(Var); ok && idx != 0 { - tpe := types.Keys(rc.env.getRefRecExtent(node)) - if exist := rc.env.Get(v); exist != nil { - if !unifies(tpe, exist) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node)) - } - } else { - rc.env.tree.PutOne(v, tpe) - } - } - - child := node.Child(head.Value) - if child == nil { - // NOTE(sr): idx is reset on purpose: we start over - switch { - case curr.next != nil: - next := curr.next - return rc.checkRef(next, next.tree, ref, 0) - - case RootDocumentNames.Contains(ref[0]): - if idx != 0 { - node.Children().Iter(func(_, child util.T) bool { - _ = rc.checkRef(curr, child.(*typeTreeNode), ref, idx+1) // ignore error - return false - }) - return nil - } - return rc.checkRefLeaf(types.A, ref, 1) - - default: - return rc.checkRefLeaf(types.A, ref, 0) - } - } - - if child.Leaf() { - return rc.checkRefLeaf(child.Value(), ref, idx+1) - } - - return rc.checkRef(curr, child, ref, idx+1) -} - -func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error { - - if idx == len(ref) { - return nil - } - - head := ref[idx] - - keys := types.Keys(tpe) - if keys == nil { - return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe) - } - - switch value := head.Value.(type) { - - case Var: - if exist := rc.env.Get(value); exist != nil { - if !unifies(exist, keys) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) - } - } else { - rc.env.tree.PutOne(value, types.Keys(tpe)) - } - - case Ref: - if exist := rc.env.Get(value); exist != nil { - if !unifies(exist, keys) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) - } - } - - case *Array, Object, Set: - if !unify1(rc.env, head, keys, false) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil) - } - - default: - child := selectConstant(tpe, head) - if child == nil { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe)) - } - return rc.checkRefLeaf(child, ref, idx+1) - } - - return rc.checkRefLeaf(types.Values(tpe), ref, idx+1) -} - -func unifies(a, b types.Type) bool { - - if a == nil || b == nil { - return false - } - - anyA, ok1 := a.(types.Any) - if ok1 { - if unifiesAny(anyA, b) { - return true - } - } - - anyB, ok2 := b.(types.Any) - if ok2 { - if unifiesAny(anyB, a) { - return true - } - } - - if ok1 || ok2 { - return false - } - - switch a := a.(type) { - case types.Null: - _, ok := b.(types.Null) - return ok - case types.Boolean: - _, ok := b.(types.Boolean) - return ok - case types.Number: - _, ok := b.(types.Number) - return ok - case types.String: - _, ok := b.(types.String) - return ok - case *types.Array: - b, ok := b.(*types.Array) - if !ok { - return false - } - return unifiesArrays(a, b) - case *types.Object: - b, ok := b.(*types.Object) - if !ok { - return false - } - return unifiesObjects(a, b) - case *types.Set: - b, ok := b.(*types.Set) - if !ok { - return false - } - return unifies(types.Values(a), types.Values(b)) - case *types.Function: - // NOTE(sr): variadic functions can only be internal ones, and we've forbidden - // their replacement via `with`; so we disregard variadic here - if types.Arity(a) == types.Arity(b) { - b := b.(*types.Function) - for i := range a.FuncArgs().Args { - if !unifies(a.FuncArgs().Arg(i), b.FuncArgs().Arg(i)) { - return false - } - } - return true - } - return false - default: - panic("unreachable") - } -} - -func unifiesAny(a types.Any, b types.Type) bool { - if _, ok := b.(*types.Function); ok { - return false - } - for i := range a { - if unifies(a[i], b) { - return true - } - } - return len(a) == 0 -} - -func unifiesArrays(a, b *types.Array) bool { - - if !unifiesArraysStatic(a, b) { - return false - } - - if !unifiesArraysStatic(b, a) { - return false - } - - return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic()) -} - -func unifiesArraysStatic(a, b *types.Array) bool { - if a.Len() != 0 { - for i := 0; i < a.Len(); i++ { - if !unifies(a.Select(i), b.Select(i)) { - return false - } - } - } - return true -} - -func unifiesObjects(a, b *types.Object) bool { - if !unifiesObjectsStatic(a, b) { - return false - } - - if !unifiesObjectsStatic(b, a) { - return false - } - - return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue()) -} - -func unifiesObjectsStatic(a, b *types.Object) bool { - for _, k := range a.Keys() { - if !unifies(a.Select(k), b.Select(k)) { - return false - } - } - return true -} - -// typeErrorCause defines an interface to determine the reason for a type -// error. The type error details implement this interface so that type checking -// can report more actionable errors. -type typeErrorCause interface { - nilType() bool -} - -func causedByNilType(err *Error) bool { - cause, ok := err.Details.(typeErrorCause) - if !ok { - return false - } - return cause.nilType() -} - -// ArgErrDetail represents a generic argument error. -type ArgErrDetail struct { - Have []types.Type `json:"have"` - Want types.FuncArgs `json:"want"` -} - -// Lines returns the string representation of the detail. -func (d *ArgErrDetail) Lines() []string { - lines := make([]string, 2) - lines[0] = "have: " + formatArgs(d.Have) - lines[1] = "want: " + fmt.Sprint(d.Want) - return lines -} - -func (d *ArgErrDetail) nilType() bool { - for i := range d.Have { - if types.Nil(d.Have[i]) { - return true - } - } - return false -} - // UnificationErrDetail describes a type mismatch error when two values are // unified (e.g., x = [1,2,y]). -type UnificationErrDetail struct { - Left types.Type `json:"a"` - Right types.Type `json:"b"` -} - -func (a *UnificationErrDetail) nilType() bool { - return types.Nil(a.Left) || types.Nil(a.Right) -} - -// Lines returns the string representation of the detail. -func (a *UnificationErrDetail) Lines() []string { - lines := make([]string, 2) - lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left)) - lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right)) - return lines -} +type UnificationErrDetail = v1.UnificationErrDetail // RefErrUnsupportedDetail describes an undefined reference error where the // referenced value does not support dereferencing (e.g., scalars). -type RefErrUnsupportedDetail struct { - Ref Ref `json:"ref"` // invalid ref - Pos int `json:"pos"` // invalid element - Have types.Type `json:"have"` // referenced type -} - -// Lines returns the string representation of the detail. -func (r *RefErrUnsupportedDetail) Lines() []string { - lines := []string{ - r.Ref.String(), - strings.Repeat("^", len(r.Ref[:r.Pos+1].String())), - fmt.Sprintf("have: %v", r.Have), - } - return lines -} +type RefErrUnsupportedDetail = v1.RefErrUnsupportedDetail // RefErrInvalidDetail describes an undefined reference error where the referenced // value does not support the reference operand (e.g., missing object key, // invalid key type, etc.) -type RefErrInvalidDetail struct { - Ref Ref `json:"ref"` // invalid ref - Pos int `json:"pos"` // invalid element - Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements) - Want types.Type `json:"want"` // allowed type (for non-object values) - OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys) -} - -// Lines returns the string representation of the detail. -func (r *RefErrInvalidDetail) Lines() []string { - lines := []string{r.Ref.String()} - offset := len(r.Ref[:r.Pos].String()) + 1 - pad := strings.Repeat(" ", offset) - lines = append(lines, fmt.Sprintf("%s^", pad)) - if r.Have != nil { - lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have)) - } else { - lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos])) - } - if len(r.OneOf) > 0 { - lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf)) - } else { - lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want)) - } - return lines -} - -func formatArgs(args []types.Type) string { - buf := make([]string, len(args)) - for i := range args { - buf[i] = types.Sprint(args[i]) - } - return "(" + strings.Join(buf, ", ") + ")" -} - -func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error { - err := newRefError(loc, ref) - err.Details = &RefErrInvalidDetail{ - Ref: ref, - Pos: idx, - Have: have, - Want: want, - OneOf: oneOf, - } - return err -} - -func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error { - err := newRefError(loc, ref) - err.Details = &RefErrUnsupportedDetail{ - Ref: ref, - Pos: idx, - Have: have, - } - return err -} - -func newRefError(loc *Location, ref Ref) *Error { - return NewError(TypeErr, loc, "undefined ref: %v", ref) -} - -func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want types.FuncArgs) *Error { - err := NewError(TypeErr, loc, "%v: %v", builtinName, msg) - err.Details = &ArgErrDetail{ - Have: have, - Want: want, - } - return err -} - -func getOneOfForNode(node *typeTreeNode) (result []Value) { - node.Children().Iter(func(k, _ util.T) bool { - result = append(result, k.(Value)) - return false - }) - - sortValueSlice(result) - return result -} - -func getOneOfForType(tpe types.Type) (result []Value) { - switch tpe := tpe.(type) { - case *types.Object: - for _, k := range tpe.Keys() { - v, err := InterfaceToValue(k) - if err != nil { - panic(err) - } - result = append(result, v) - } - - case types.Any: - for _, object := range tpe { - objRes := getOneOfForType(object) - result = append(result, objRes...) - } - } - - result = removeDuplicate(result) - sortValueSlice(result) - return result -} - -func sortValueSlice(sl []Value) { - sort.Slice(sl, func(i, j int) bool { - return sl[i].Compare(sl[j]) < 0 - }) -} - -func removeDuplicate(list []Value) []Value { - seen := make(map[Value]bool) - var newResult []Value - for _, item := range list { - if !seen[item] { - newResult = append(newResult, item) - seen[item] = true - } - } - return newResult -} - -func getArgTypes(env *TypeEnv, args []*Term) []types.Type { - pre := make([]types.Type, len(args)) - for i := range args { - pre[i] = env.Get(args[i]) - } - return pre -} - -// getPrefix returns the shortest prefix of ref that exists in env -func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) { - if len(ref) == 1 { - t := env.Get(ref) - if t != nil { - return ref, t - } - } - for i := 1; i < len(ref); i++ { - t := env.Get(ref[:i]) - if t != nil { - return ref[:i], t - } - } - return nil, nil -} - -// override takes a type t and returns a type obtained from t where the path represented by ref within it has type o (overriding the original type of that path) -func override(ref Ref, t types.Type, o types.Type, rule *Rule) (types.Type, *Error) { - var newStaticProps []*types.StaticProperty - obj, ok := t.(*types.Object) - if !ok { - newType, err := getObjectType(ref, o, rule, types.NewDynamicProperty(types.A, types.A)) - if err != nil { - return nil, err - } - return newType, nil - } - found := false - if ok { - staticProps := obj.StaticProperties() - for _, prop := range staticProps { - valueCopy := prop.Value - key, err := InterfaceToValue(prop.Key) - if err != nil { - return nil, NewError(TypeErr, rule.Location, "unexpected error in override: %s", err.Error()) - } - if len(ref) > 0 && ref[0].Value.Compare(key) == 0 { - found = true - if len(ref) == 1 { - valueCopy = o - } else { - newVal, err := override(ref[1:], valueCopy, o, rule) - if err != nil { - return nil, err - } - valueCopy = newVal - } - } - newStaticProps = append(newStaticProps, types.NewStaticProperty(prop.Key, valueCopy)) - } - } - - // ref[0] is not a top-level key in staticProps, so it must be added - if !found { - newType, err := getObjectType(ref, o, rule, obj.DynamicProperties()) - if err != nil { - return nil, err - } - newStaticProps = append(newStaticProps, newType.StaticProperties()...) - } - return types.NewObject(newStaticProps, obj.DynamicProperties()), nil -} - -func getKeys(ref Ref, rule *Rule) ([]interface{}, *Error) { - keys := []interface{}{} - for _, refElem := range ref { - key, err := JSON(refElem.Value) - if err != nil { - return nil, NewError(TypeErr, rule.Location, "error getting key from value: %s", err.Error()) - } - keys = append(keys, key) - } - return keys, nil -} - -func getObjectTypeRec(keys []interface{}, o types.Type, d *types.DynamicProperty) *types.Object { - if len(keys) == 1 { - staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], o)} - return types.NewObject(staticProps, d) - } - - staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], getObjectTypeRec(keys[1:], o, d))} - return types.NewObject(staticProps, d) -} - -func getObjectType(ref Ref, o types.Type, rule *Rule, d *types.DynamicProperty) (*types.Object, *Error) { - keys, err := getKeys(ref, rule) - if err != nil { - return nil, err - } - return getObjectTypeRec(keys, o, d), nil -} - -func getRuleAnnotation(as *AnnotationSet, rule *Rule) (result []*SchemaAnnotation) { - - for _, x := range as.GetSubpackagesScope(rule.Module.Package.Path) { - result = append(result, x.Schemas...) - } - - if x := as.GetPackageScope(rule.Module.Package); x != nil { - result = append(result, x.Schemas...) - } - - if x := as.GetDocumentScope(rule.Ref().GroundPrefix()); x != nil { - result = append(result, x.Schemas...) - } - - for _, x := range as.GetRuleScope(rule) { - result = append(result, x.Schemas...) - } - - return result -} - -func processAnnotation(ss *SchemaSet, annot *SchemaAnnotation, rule *Rule, allowNet []string) (types.Type, *Error) { - - var schema interface{} - - if annot.Schema != nil { - if ss == nil { - return nil, nil - } - schema = ss.Get(annot.Schema) - if schema == nil { - return nil, NewError(TypeErr, rule.Location, "undefined schema: %v", annot.Schema) - } - } else if annot.Definition != nil { - schema = *annot.Definition - } - - tpe, err := loadSchema(schema, allowNet) - if err != nil { - return nil, NewError(TypeErr, rule.Location, err.Error()) - } - - return tpe, nil -} - -func errAnnotationRedeclared(a *Annotations, other *Location) *Error { - return NewError(TypeErr, a.Location, "%v annotation redeclared: %v", a.Scope, other) -} +type RefErrInvalidDetail = v1.RefErrInvalidDetail diff --git a/vendor/github.com/open-policy-agent/opa/ast/compare.go b/vendor/github.com/open-policy-agent/opa/ast/compare.go index 3bb6f2a75d6..d36078e3384 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/compare.go +++ b/vendor/github.com/open-policy-agent/opa/ast/compare.go @@ -5,9 +5,7 @@ package ast import ( - "encoding/json" - "fmt" - "math/big" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Compare returns an integer indicating whether two AST values are less than, @@ -37,360 +35,5 @@ import ( // is empty. // Other comparisons are consistent but not defined. func Compare(a, b interface{}) int { - - if t, ok := a.(*Term); ok { - if t == nil { - a = nil - } else { - a = t.Value - } - } - - if t, ok := b.(*Term); ok { - if t == nil { - b = nil - } else { - b = t.Value - } - } - - if a == nil { - if b == nil { - return 0 - } - return -1 - } - if b == nil { - return 1 - } - - sortA := sortOrder(a) - sortB := sortOrder(b) - - if sortA < sortB { - return -1 - } else if sortB < sortA { - return 1 - } - - switch a := a.(type) { - case Null: - return 0 - case Boolean: - b := b.(Boolean) - if a.Equal(b) { - return 0 - } - if !a { - return -1 - } - return 1 - case Number: - if ai, err := json.Number(a).Int64(); err == nil { - if bi, err := json.Number(b.(Number)).Int64(); err == nil { - if ai == bi { - return 0 - } - if ai < bi { - return -1 - } - return 1 - } - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var bigA, bigB *big.Rat - fa, ok := new(big.Float).SetString(string(a)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - bigA = new(big.Rat).SetInt64(0) - } - } - if bigA == nil { - bigA, ok = new(big.Rat).SetString(string(a)) - if !ok { - panic("illegal value") - } - } - - fb, ok := new(big.Float).SetString(string(b.(Number))) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - bigB = new(big.Rat).SetInt64(0) - } - } - if bigB == nil { - bigB, ok = new(big.Rat).SetString(string(b.(Number))) - if !ok { - panic("illegal value") - } - } - - return bigA.Cmp(bigB) - case String: - b := b.(String) - if a.Equal(b) { - return 0 - } - if a < b { - return -1 - } - return 1 - case Var: - b := b.(Var) - if a.Equal(b) { - return 0 - } - if a < b { - return -1 - } - return 1 - case Ref: - b := b.(Ref) - return termSliceCompare(a, b) - case *Array: - b := b.(*Array) - return termSliceCompare(a.elems, b.elems) - case *lazyObj: - return Compare(a.force(), b) - case *object: - if x, ok := b.(*lazyObj); ok { - b = x.force() - } - b := b.(*object) - return a.Compare(b) - case Set: - b := b.(Set) - return a.Compare(b) - case *ArrayComprehension: - b := b.(*ArrayComprehension) - if cmp := Compare(a.Term, b.Term); cmp != 0 { - return cmp - } - return Compare(a.Body, b.Body) - case *ObjectComprehension: - b := b.(*ObjectComprehension) - if cmp := Compare(a.Key, b.Key); cmp != 0 { - return cmp - } - if cmp := Compare(a.Value, b.Value); cmp != 0 { - return cmp - } - return Compare(a.Body, b.Body) - case *SetComprehension: - b := b.(*SetComprehension) - if cmp := Compare(a.Term, b.Term); cmp != 0 { - return cmp - } - return Compare(a.Body, b.Body) - case Call: - b := b.(Call) - return termSliceCompare(a, b) - case *Expr: - b := b.(*Expr) - return a.Compare(b) - case *SomeDecl: - b := b.(*SomeDecl) - return a.Compare(b) - case *Every: - b := b.(*Every) - return a.Compare(b) - case *With: - b := b.(*With) - return a.Compare(b) - case Body: - b := b.(Body) - return a.Compare(b) - case *Head: - b := b.(*Head) - return a.Compare(b) - case *Rule: - b := b.(*Rule) - return a.Compare(b) - case Args: - b := b.(Args) - return termSliceCompare(a, b) - case *Import: - b := b.(*Import) - return a.Compare(b) - case *Package: - b := b.(*Package) - return a.Compare(b) - case *Annotations: - b := b.(*Annotations) - return a.Compare(b) - case *Module: - b := b.(*Module) - return a.Compare(b) - } - panic(fmt.Sprintf("illegal value: %T", a)) -} - -type termSlice []*Term - -func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 } -func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } -func (s termSlice) Len() int { return len(s) } - -func sortOrder(x interface{}) int { - switch x.(type) { - case Null: - return 0 - case Boolean: - return 1 - case Number: - return 2 - case String: - return 3 - case Var: - return 4 - case Ref: - return 5 - case *Array: - return 6 - case Object: - return 7 - case Set: - return 8 - case *ArrayComprehension: - return 9 - case *ObjectComprehension: - return 10 - case *SetComprehension: - return 11 - case Call: - return 12 - case Args: - return 13 - case *Expr: - return 100 - case *SomeDecl: - return 101 - case *Every: - return 102 - case *With: - return 110 - case *Head: - return 120 - case Body: - return 200 - case *Rule: - return 1000 - case *Import: - return 1001 - case *Package: - return 1002 - case *Annotations: - return 1003 - case *Module: - return 10000 - } - panic(fmt.Sprintf("illegal value: %T", x)) -} - -func importsCompare(a, b []*Import) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } - if len(b) < len(a) { - return 1 - } - return 0 -} - -func annotationsCompare(a, b []*Annotations) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } - if len(b) < len(a) { - return 1 - } - return 0 -} - -func rulesCompare(a, b []*Rule) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } - if len(b) < len(a) { - return 1 - } - return 0 -} - -func termSliceCompare(a, b []*Term) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } else if len(b) < len(a) { - return 1 - } - return 0 -} - -func withSliceCompare(a, b []*With) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } else if len(b) < len(a) { - return 1 - } - return 0 + return v1.Compare(a, b) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/compile.go b/vendor/github.com/open-policy-agent/opa/ast/compile.go index 9025f862b2d..5a3daa910a7 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/compile.go +++ b/vendor/github.com/open-policy-agent/opa/ast/compile.go @@ -5,5882 +5,123 @@ package ast import ( - "errors" - "fmt" - "io" - "sort" - "strconv" - "strings" - - "github.com/open-policy-agent/opa/ast/location" - "github.com/open-policy-agent/opa/internal/debug" - "github.com/open-policy-agent/opa/internal/gojsonschema" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // CompileErrorLimitDefault is the default number errors a compiler will allow before // exiting. const CompileErrorLimitDefault = 10 -var errLimitReached = NewError(CompileErr, nil, "error limit reached") - // Compiler contains the state of a compilation process. -type Compiler struct { - - // Errors contains errors that occurred during the compilation process. - // If there are one or more errors, the compilation process is considered - // "failed". - Errors Errors - - // Modules contains the compiled modules. The compiled modules are the - // output of the compilation process. If the compilation process failed, - // there is no guarantee about the state of the modules. - Modules map[string]*Module - - // ModuleTree organizes the modules into a tree where each node is keyed by - // an element in the module's package path. E.g., given modules containing - // the following package directives: "a", "a.b", "a.c", and "a.b", the - // resulting module tree would be: - // - // root - // | - // +--- data (no modules) - // | - // +--- a (1 module) - // | - // +--- b (2 modules) - // | - // +--- c (1 module) - // - ModuleTree *ModuleTreeNode - - // RuleTree organizes rules into a tree where each node is keyed by an - // element in the rule's path. The rule path is the concatenation of the - // containing package and the stringified rule name. E.g., given the - // following module: - // - // package ex - // p[1] { true } - // p[2] { true } - // q = true - // a.b.c = 3 - // - // root - // | - // +--- data (no rules) - // | - // +--- ex (no rules) - // | - // +--- p (2 rules) - // | - // +--- q (1 rule) - // | - // +--- a - // | - // +--- b - // | - // +--- c (1 rule) - // - // Another example with general refs containing vars at arbitrary locations: - // - // package ex - // a.b[x].d { x := "c" } # R1 - // a.b.c[x] { x := "d" } # R2 - // a.b[x][y] { x := "c"; y := "d" } # R3 - // p := true # R4 - // - // root - // | - // +--- data (no rules) - // | - // +--- ex (no rules) - // | - // +--- a - // | | - // | +--- b (R1, R3) - // | | - // | +--- c (R2) - // | - // +--- p (R4) - RuleTree *TreeNode - - // Graph contains dependencies between rules. An edge (u,v) is added to the - // graph if rule 'u' refers to the virtual document defined by 'v'. - Graph *Graph - - // TypeEnv holds type information for values inferred by the compiler. - TypeEnv *TypeEnv - - // RewrittenVars is a mapping of variables that have been rewritten - // with the key being the generated name and value being the original. - RewrittenVars map[Var]Var - - // Capabliities required by the modules that were compiled. - Required *Capabilities - - localvargen *localVarGenerator - moduleLoader ModuleLoader - ruleIndices *util.HashMap - stages []stage - maxErrs int - sorted []string // list of sorted module names - pathExists func([]string) (bool, error) - after map[string][]CompilerStageDefinition - metrics metrics.Metrics - capabilities *Capabilities // user-supplied capabilities - imports map[string][]*Import // saved imports from stripping - builtins map[string]*Builtin // universe of built-in functions - customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities) - unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities) - deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions - enablePrintStatements bool // indicates if print statements should be elided (default) - comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index - initialized bool // indicates if init() has been called - debug debug.Debug // emits debug information produced during compilation - schemaSet *SchemaSet // user-supplied schemas for input and data documents - inputType types.Type // global input type retrieved from schema set - annotationSet *AnnotationSet // hierarchical set of annotations - strict bool // enforce strict compilation checks - keepModules bool // whether to keep the unprocessed, parse modules (below) - parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true - useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker - allowUndefinedFuncCalls bool // don't error on calls to unknown functions. - evalMode CompilerEvalMode // - rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing. -} +type Compiler = v1.Compiler // CompilerStage defines the interface for stages in the compiler. -type CompilerStage func(*Compiler) *Error +type CompilerStage = v1.CompilerStage // CompilerEvalMode allows toggling certain stages that are only // needed for certain modes, Concretely, only "topdown" mode will // have the compiler build comprehension and rule indices. -type CompilerEvalMode int +type CompilerEvalMode = v1.CompilerEvalMode const ( // EvalModeTopdown (default) instructs the compiler to build rule // and comprehension indices used by topdown evaluation. - EvalModeTopdown CompilerEvalMode = iota + EvalModeTopdown = v1.EvalModeTopdown // EvalModeIR makes the compiler skip the stages for comprehension // and rule indices. - EvalModeIR + EvalModeIR = v1.EvalModeIR ) // CompilerStageDefinition defines a compiler stage -type CompilerStageDefinition struct { - Name string - MetricName string - Stage CompilerStage -} +type CompilerStageDefinition = v1.CompilerStageDefinition // RulesOptions defines the options for retrieving rules by Ref from the // compiler. -type RulesOptions struct { - // IncludeHiddenModules determines if the result contains hidden modules, - // currently only the "system" namespace, i.e. "data.system.*". - IncludeHiddenModules bool -} +type RulesOptions = v1.RulesOptions // QueryContext contains contextual information for running an ad-hoc query. // // Ad-hoc queries can be run in the context of a package and imports may be // included to provide concise access to data. -type QueryContext struct { - Package *Package - Imports []*Import -} +type QueryContext = v1.QueryContext // NewQueryContext returns a new QueryContext object. func NewQueryContext() *QueryContext { - return &QueryContext{} -} - -// WithPackage sets the pkg on qc. -func (qc *QueryContext) WithPackage(pkg *Package) *QueryContext { - if qc == nil { - qc = NewQueryContext() - } - qc.Package = pkg - return qc -} - -// WithImports sets the imports on qc. -func (qc *QueryContext) WithImports(imports []*Import) *QueryContext { - if qc == nil { - qc = NewQueryContext() - } - qc.Imports = imports - return qc -} - -// Copy returns a deep copy of qc. -func (qc *QueryContext) Copy() *QueryContext { - if qc == nil { - return nil - } - cpy := *qc - if cpy.Package != nil { - cpy.Package = qc.Package.Copy() - } - cpy.Imports = make([]*Import, len(qc.Imports)) - for i := range qc.Imports { - cpy.Imports[i] = qc.Imports[i].Copy() - } - return &cpy + return v1.NewQueryContext() } // QueryCompiler defines the interface for compiling ad-hoc queries. -type QueryCompiler interface { - - // Compile should be called to compile ad-hoc queries. The return value is - // the compiled version of the query. - Compile(q Body) (Body, error) - - // TypeEnv returns the type environment built after running type checking - // on the query. - TypeEnv() *TypeEnv - - // WithContext sets the QueryContext on the QueryCompiler. Subsequent calls - // to Compile will take the QueryContext into account. - WithContext(qctx *QueryContext) QueryCompiler - - // WithEnablePrintStatements enables print statements in queries compiled - // with the QueryCompiler. - WithEnablePrintStatements(yes bool) QueryCompiler - - // WithUnsafeBuiltins sets the built-in functions to treat as unsafe and not - // allow inside of queries. By default the query compiler inherits the - // compiler's unsafe built-in functions. This function allows callers to - // override that set. If an empty (non-nil) map is provided, all built-ins - // are allowed. - WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler - - // WithStageAfter registers a stage to run during query compilation after - // the named stage. - WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler - - // RewrittenVars maps generated vars in the compiled query to vars from the - // parsed query. For example, given the query "input := 1" the rewritten - // query would be "__local0__ = 1". The mapping would then be {__local0__: input}. - RewrittenVars() map[Var]Var - - // ComprehensionIndex returns an index data structure for the given comprehension - // term. If no index is found, returns nil. - ComprehensionIndex(term *Term) *ComprehensionIndex - - // WithStrict enables strict mode for the query compiler. - WithStrict(strict bool) QueryCompiler -} +type QueryCompiler = v1.QueryCompiler // QueryCompilerStage defines the interface for stages in the query compiler. -type QueryCompilerStage func(QueryCompiler, Body) (Body, error) +type QueryCompilerStage = v1.QueryCompilerStage // QueryCompilerStageDefinition defines a QueryCompiler stage -type QueryCompilerStageDefinition struct { - Name string - MetricName string - Stage QueryCompilerStage -} - -type stage struct { - name string - metricName string - f func() -} +type QueryCompilerStageDefinition = v1.QueryCompilerStageDefinition // NewCompiler returns a new empty compiler. func NewCompiler() *Compiler { - - c := &Compiler{ - Modules: map[string]*Module{}, - RewrittenVars: map[Var]Var{}, - Required: &Capabilities{}, - ruleIndices: util.NewHashMap(func(a, b util.T) bool { - r1, r2 := a.(Ref), b.(Ref) - return r1.Equal(r2) - }, func(x util.T) int { - return x.(Ref).Hash() - }), - maxErrs: CompileErrorLimitDefault, - after: map[string][]CompilerStageDefinition{}, - unsafeBuiltinsMap: map[string]struct{}{}, - deprecatedBuiltinsMap: map[string]struct{}{}, - comprehensionIndices: map[*Term]*ComprehensionIndex{}, - debug: debug.Discard(), - } - - c.ModuleTree = NewModuleTree(nil) - c.RuleTree = NewRuleTree(c.ModuleTree) - - c.stages = []stage{ - // Reference resolution should run first as it may be used to lazily - // load additional modules. If any stages run before resolution, they - // need to be re-run after resolution. - {"ResolveRefs", "compile_stage_resolve_refs", c.resolveAllRefs}, - // The local variable generator must be initialized after references are - // resolved and the dynamic module loader has run but before subsequent - // stages that need to generate variables. - {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen}, - {"RewriteRuleHeadRefs", "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs}, - {"CheckKeywordOverrides", "compile_stage_check_keyword_overrides", c.checkKeywordOverrides}, - {"CheckDuplicateImports", "compile_stage_check_duplicate_imports", c.checkDuplicateImports}, - {"RemoveImports", "compile_stage_remove_imports", c.removeImports}, - {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree}, - {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs - {"RewriteLocalVars", "compile_stage_rewrite_local_vars", c.rewriteLocalVars}, - {"CheckVoidCalls", "compile_stage_check_void_calls", c.checkVoidCalls}, - {"RewritePrintCalls", "compile_stage_rewrite_print_calls", c.rewritePrintCalls}, - {"RewriteExprTerms", "compile_stage_rewrite_expr_terms", c.rewriteExprTerms}, - {"ParseMetadataBlocks", "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks}, - {"SetAnnotationSet", "compile_stage_set_annotationset", c.setAnnotationSet}, - {"RewriteRegoMetadataCalls", "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls}, - {"SetGraph", "compile_stage_set_graph", c.setGraph}, - {"RewriteComprehensionTerms", "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms}, - {"RewriteRefsInHead", "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead}, - {"RewriteWithValues", "compile_stage_rewrite_with_values", c.rewriteWithModifiers}, - {"CheckRuleConflicts", "compile_stage_check_rule_conflicts", c.checkRuleConflicts}, - {"CheckUndefinedFuncs", "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs}, - {"CheckSafetyRuleHeads", "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads}, - {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies}, - {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals}, - {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms}, - {"RewriteTestRulesForTracing", "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms - {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion}, - {"CheckTypes", "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion - {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins}, - {"CheckDeprecatedBuiltins", "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins}, - {"BuildRuleIndices", "compile_stage_rebuild_indices", c.buildRuleIndices}, - {"BuildComprehensionIndices", "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices}, - {"BuildRequiredCapabilities", "compile_stage_build_required_capabilities", c.buildRequiredCapabilities}, - } - - return c -} - -// SetErrorLimit sets the number of errors the compiler can encounter before it -// quits. Zero or a negative number indicates no limit. -func (c *Compiler) SetErrorLimit(limit int) *Compiler { - c.maxErrs = limit - return c -} - -// WithEnablePrintStatements enables print statements inside of modules compiled -// by the compiler. If print statements are not enabled, calls to print() are -// erased at compile-time. -func (c *Compiler) WithEnablePrintStatements(yes bool) *Compiler { - c.enablePrintStatements = yes - return c -} - -// WithPathConflictsCheck enables base-virtual document conflict -// detection. The compiler will check that rules don't overlap with -// paths that exist as determined by the provided callable. -func (c *Compiler) WithPathConflictsCheck(fn func([]string) (bool, error)) *Compiler { - c.pathExists = fn - return c -} - -// WithStageAfter registers a stage to run during compilation after -// the named stage. -func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler { - c.after[after] = append(c.after[after], stage) - return c -} - -// WithMetrics will set a metrics.Metrics and be used for profiling -// the Compiler instance. -func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler { - c.metrics = metrics - return c -} - -// WithCapabilities sets capabilities to enable during compilation. Capabilities allow the caller -// to specify the set of built-in functions available to the policy. In the future, capabilities -// may be able to restrict access to other language features. Capabilities allow callers to check -// if policies are compatible with a particular version of OPA. If policies are a compiled for a -// specific version of OPA, there is no guarantee that _this_ version of OPA can evaluate them -// successfully. -func (c *Compiler) WithCapabilities(capabilities *Capabilities) *Compiler { - c.capabilities = capabilities - return c -} - -// Capabilities returns the capabilities enabled during compilation. -func (c *Compiler) Capabilities() *Capabilities { - return c.capabilities -} - -// WithDebug sets where debug messages are written to. Passing `nil` has no -// effect. -func (c *Compiler) WithDebug(sink io.Writer) *Compiler { - if sink != nil { - c.debug = debug.New(sink) - } - return c -} - -// WithBuiltins is deprecated. Use WithCapabilities instead. -func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler { - c.customBuiltins = make(map[string]*Builtin) - for k, v := range builtins { - c.customBuiltins[k] = v - } - return c -} - -// WithUnsafeBuiltins is deprecated. Use WithCapabilities instead. -func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler { - for name := range unsafeBuiltins { - c.unsafeBuiltinsMap[name] = struct{}{} - } - return c -} - -// WithStrict enables strict mode in the compiler. -func (c *Compiler) WithStrict(strict bool) *Compiler { - c.strict = strict - return c -} - -// WithKeepModules enables retaining unprocessed modules in the compiler. -// Note that the modules aren't copied on the way in or out -- so when -// accessing them via ParsedModules(), mutations will occur in the module -// map that was passed into Compile().` -func (c *Compiler) WithKeepModules(y bool) *Compiler { - c.keepModules = y - return c -} - -// WithUseTypeCheckAnnotations use schema annotations during type checking -func (c *Compiler) WithUseTypeCheckAnnotations(enabled bool) *Compiler { - c.useTypeCheckAnnotations = enabled - return c -} - -func (c *Compiler) WithAllowUndefinedFunctionCalls(allow bool) *Compiler { - c.allowUndefinedFuncCalls = allow - return c -} - -// WithEvalMode allows setting the CompilerEvalMode of the compiler -func (c *Compiler) WithEvalMode(e CompilerEvalMode) *Compiler { - c.evalMode = e - return c -} - -// WithRewriteTestRules enables rewriting test rules to capture dynamic values in local variables, -// so they can be accessed by tracing. -func (c *Compiler) WithRewriteTestRules(rewrite bool) *Compiler { - c.rewriteTestRulesForTracing = rewrite - return c -} - -// ParsedModules returns the parsed, unprocessed modules from the compiler. -// It is `nil` if keeping modules wasn't enabled via `WithKeepModules(true)`. -// The map includes all modules loaded via the ModuleLoader, if one was used. -func (c *Compiler) ParsedModules() map[string]*Module { - return c.parsedModules -} - -func (c *Compiler) QueryCompiler() QueryCompiler { - c.init() - c0 := *c - return newQueryCompiler(&c0) -} - -// Compile runs the compilation process on the input modules. The compiled -// version of the modules and associated data structures are stored on the -// compiler. If the compilation process fails for any reason, the compiler will -// contain a slice of errors. -func (c *Compiler) Compile(modules map[string]*Module) { - - c.init() - - c.Modules = make(map[string]*Module, len(modules)) - c.sorted = make([]string, 0, len(modules)) - - if c.keepModules { - c.parsedModules = make(map[string]*Module, len(modules)) - } else { - c.parsedModules = nil - } - - for k, v := range modules { - c.Modules[k] = v.Copy() - c.sorted = append(c.sorted, k) - if c.parsedModules != nil { - c.parsedModules[k] = v - } - } - - sort.Strings(c.sorted) - - c.compile() -} - -// WithSchemas sets a schemaSet to the compiler -func (c *Compiler) WithSchemas(schemas *SchemaSet) *Compiler { - c.schemaSet = schemas - return c -} - -// Failed returns true if a compilation error has been encountered. -func (c *Compiler) Failed() bool { - return len(c.Errors) > 0 -} - -// ComprehensionIndex returns a data structure specifying how to index comprehension -// results so that callers do not have to recompute the comprehension more than once. -// If no index is found, returns nil. -func (c *Compiler) ComprehensionIndex(term *Term) *ComprehensionIndex { - return c.comprehensionIndices[term] -} - -// GetArity returns the number of args a function referred to by ref takes. If -// ref refers to built-in function, the built-in declaration is consulted, -// otherwise, the ref is used to perform a ruleset lookup. -func (c *Compiler) GetArity(ref Ref) int { - if bi := c.builtins[ref.String()]; bi != nil { - return len(bi.Decl.FuncArgs().Args) - } - rules := c.GetRulesExact(ref) - if len(rules) == 0 { - return -1 - } - return len(rules[0].Head.Args) -} - -// GetRulesExact returns a slice of rules referred to by the reference. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[k] = v { ... } # rule1 -// p[k1] = v1 { ... } # rule2 -// -// The following calls yield the rules on the right. -// -// GetRulesExact("data.a.b.c.p") => [rule1, rule2] -// GetRulesExact("data.a.b.c.p.x") => nil -// GetRulesExact("data.a.b.c") => nil -func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) { - node := c.RuleTree - - for _, x := range ref { - if node = node.Child(x.Value); node == nil { - return nil - } - } - - return extractRules(node.Values) -} - -// GetRulesForVirtualDocument returns a slice of rules that produce the virtual -// document referred to by the reference. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[k] = v { ... } # rule1 -// p[k1] = v1 { ... } # rule2 -// -// The following calls yield the rules on the right. -// -// GetRulesForVirtualDocument("data.a.b.c.p") => [rule1, rule2] -// GetRulesForVirtualDocument("data.a.b.c.p.x") => [rule1, rule2] -// GetRulesForVirtualDocument("data.a.b.c") => nil -func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) { - - node := c.RuleTree - - for _, x := range ref { - if node = node.Child(x.Value); node == nil { - return nil - } - if len(node.Values) > 0 { - return extractRules(node.Values) - } - } - - return extractRules(node.Values) -} - -// GetRulesWithPrefix returns a slice of rules that share the prefix ref. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[x] = y { ... } # rule1 -// p[k] = v { ... } # rule2 -// q { ... } # rule3 -// -// The following calls yield the rules on the right. -// -// GetRulesWithPrefix("data.a.b.c.p") => [rule1, rule2] -// GetRulesWithPrefix("data.a.b.c.p.a") => nil -// GetRulesWithPrefix("data.a.b.c") => [rule1, rule2, rule3] -func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) { - - node := c.RuleTree - - for _, x := range ref { - if node = node.Child(x.Value); node == nil { - return nil - } - } - - var acc func(node *TreeNode) - - acc = func(node *TreeNode) { - rules = append(rules, extractRules(node.Values)...) - for _, child := range node.Children { - if child.Hide { - continue - } - acc(child) - } - } - - acc(node) - - return rules -} - -func extractRules(s []util.T) []*Rule { - rules := make([]*Rule, len(s)) - for i := range s { - rules[i] = s[i].(*Rule) - } - return rules -} - -// GetRules returns a slice of rules that are referred to by ref. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[x] = y { q[x] = y; ... } # rule1 -// q[x] = y { ... } # rule2 -// -// The following calls yield the rules on the right. -// -// GetRules("data.a.b.c.p") => [rule1] -// GetRules("data.a.b.c.p.x") => [rule1] -// GetRules("data.a.b.c.q") => [rule2] -// GetRules("data.a.b.c") => [rule1, rule2] -// GetRules("data.a.b.d") => nil -func (c *Compiler) GetRules(ref Ref) (rules []*Rule) { - - set := map[*Rule]struct{}{} - - for _, rule := range c.GetRulesForVirtualDocument(ref) { - set[rule] = struct{}{} - } - - for _, rule := range c.GetRulesWithPrefix(ref) { - set[rule] = struct{}{} - } - - for rule := range set { - rules = append(rules, rule) - } - - return rules -} - -// GetRulesDynamic returns a slice of rules that could be referred to by a ref. -// -// Deprecated: use GetRulesDynamicWithOpts -func (c *Compiler) GetRulesDynamic(ref Ref) []*Rule { - return c.GetRulesDynamicWithOpts(ref, RulesOptions{}) -} - -// GetRulesDynamicWithOpts returns a slice of rules that could be referred to by -// a ref. -// When parts of the ref are statically known, we use that information to narrow -// down which rules the ref could refer to, but in the most general case this -// will be an over-approximation. -// -// E.g., given the following modules: -// -// package a.b.c -// -// r1 = 1 # rule1 -// -// and: -// -// package a.d.c -// -// r2 = 2 # rule2 -// -// The following calls yield the rules on the right. -// -// GetRulesDynamicWithOpts("data.a[x].c[y]", opts) => [rule1, rule2] -// GetRulesDynamicWithOpts("data.a[x].c.r2", opts) => [rule2] -// GetRulesDynamicWithOpts("data.a.b[x][y]", opts) => [rule1] -// -// Using the RulesOptions parameter, the inclusion of hidden modules can be -// controlled: -// -// With -// -// package system.main -// -// r3 = 3 # rule3 -// -// We'd get this result: -// -// GetRulesDynamicWithOpts("data[x]", RulesOptions{IncludeHiddenModules: true}) => [rule1, rule2, rule3] -// -// Without the options, it would be excluded. -func (c *Compiler) GetRulesDynamicWithOpts(ref Ref, opts RulesOptions) []*Rule { - node := c.RuleTree - - set := map[*Rule]struct{}{} - var walk func(node *TreeNode, i int) - walk = func(node *TreeNode, i int) { - switch { - case i >= len(ref): - // We've reached the end of the reference and want to collect everything - // under this "prefix". - node.DepthFirst(func(descendant *TreeNode) bool { - insertRules(set, descendant.Values) - if opts.IncludeHiddenModules { - return false - } - return descendant.Hide - }) - - case i == 0 || IsConstant(ref[i].Value): - // The head of the ref is always grounded. In case another part of the - // ref is also grounded, we can lookup the exact child. If it's not found - // we can immediately return... - if child := node.Child(ref[i].Value); child != nil { - if len(child.Values) > 0 { - // Add any rules at this position - insertRules(set, child.Values) - } - // There might still be "sub-rules" contributing key-value "overrides" for e.g. partial object rules, continue walking - walk(child, i+1) - } else { - return - } - - default: - // This part of the ref is a dynamic term. We can't know what it refers - // to and will just need to try all of the children. - for _, child := range node.Children { - if child.Hide && !opts.IncludeHiddenModules { - continue - } - insertRules(set, child.Values) - walk(child, i+1) - } - } - } - - walk(node, 0) - rules := make([]*Rule, 0, len(set)) - for rule := range set { - rules = append(rules, rule) - } - return rules -} - -// Utility: add all rule values to the set. -func insertRules(set map[*Rule]struct{}, rules []util.T) { - for _, rule := range rules { - set[rule.(*Rule)] = struct{}{} - } -} - -// RuleIndex returns a RuleIndex built for the rule set referred to by path. -// The path must refer to the rule set exactly, i.e., given a rule set at path -// data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a -// RuleIndex built for the rule. -func (c *Compiler) RuleIndex(path Ref) RuleIndex { - r, ok := c.ruleIndices.Get(path) - if !ok { - return nil - } - return r.(RuleIndex) -} - -// PassesTypeCheck determines whether the given body passes type checking -func (c *Compiler) PassesTypeCheck(body Body) bool { - checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) - env := c.TypeEnv - _, errs := checker.CheckBody(env, body) - return len(errs) == 0 -} - -// PassesTypeCheckRules determines whether the given rules passes type checking -func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors { - elems := []util.T{} - - for _, rule := range rules { - elems = append(elems, rule) - } - - // Load the global input schema if one was provided. - if c.schemaSet != nil { - if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { - - var allowNet []string - if c.capabilities != nil { - allowNet = c.capabilities.AllowNet - } - - tpe, err := loadSchema(schema, allowNet) - if err != nil { - return Errors{NewError(TypeErr, nil, err.Error())} - } - c.inputType = tpe - } - } - - var as *AnnotationSet - if c.useTypeCheckAnnotations { - as = c.annotationSet - } - - checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) - - if c.TypeEnv == nil { - if c.capabilities == nil { - c.capabilities = CapabilitiesForThisVersion() - } - - c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) - - for _, bi := range c.capabilities.Builtins { - c.builtins[bi.Name] = bi - } - - for name, bi := range c.customBuiltins { - c.builtins[name] = bi - } - - c.TypeEnv = checker.Env(c.builtins) - } - - _, errs := checker.CheckTypes(c.TypeEnv, elems, as) - return errs + return v1.NewCompiler().WithDefaultRegoVersion(DefaultRegoVersion) } // ModuleLoader defines the interface that callers can implement to enable lazy // loading of modules during compilation. -type ModuleLoader func(resolved map[string]*Module) (parsed map[string]*Module, err error) - -// WithModuleLoader sets f as the ModuleLoader on the compiler. -// -// The compiler will invoke the ModuleLoader after resolving all references in -// the current set of input modules. The ModuleLoader can return a new -// collection of parsed modules that are to be included in the compilation -// process. This process will repeat until the ModuleLoader returns an empty -// collection or an error. If an error is returned, compilation will stop -// immediately. -func (c *Compiler) WithModuleLoader(f ModuleLoader) *Compiler { - c.moduleLoader = f - return c -} - -func (c *Compiler) counterAdd(name string, n uint64) { - if c.metrics == nil { - return - } - c.metrics.Counter(name).Add(n) -} - -func (c *Compiler) buildRuleIndices() { - - c.RuleTree.DepthFirst(func(node *TreeNode) bool { - if len(node.Values) == 0 { - return false - } - rules := extractRules(node.Values) - hasNonGroundRef := false - for _, r := range rules { - hasNonGroundRef = !r.Head.Ref().IsGround() - } - if hasNonGroundRef { - // Collect children to ensure that all rules within the extent of a rule with a general ref - // are found on the same index. E.g. the following rules should be indexed under data.a.b.c: - // - // package a - // b.c[x].e := 1 { x := input.x } - // b.c.d := 2 - // b.c.d2.e[x] := 3 { x := input.x } - for _, child := range node.Children { - child.DepthFirst(func(c *TreeNode) bool { - rules = append(rules, extractRules(c.Values)...) - return false - }) - } - } - - index := newBaseDocEqIndex(func(ref Ref) bool { - return isVirtual(c.RuleTree, ref.GroundPrefix()) - }) - if index.Build(rules) { - c.ruleIndices.Put(rules[0].Ref().GroundPrefix(), index) - } - return hasNonGroundRef // currently, we don't allow those branches to go deeper - }) - -} - -func (c *Compiler) buildComprehensionIndices() { - for _, name := range c.sorted { - WalkRules(c.Modules[name], func(r *Rule) bool { - candidates := r.Head.Args.Vars() - candidates.Update(ReservedVars) - n := buildComprehensionIndices(c.debug, c.GetArity, candidates, c.RewrittenVars, r.Body, c.comprehensionIndices) - c.counterAdd(compileStageComprehensionIndexBuild, n) - return false - }) - } -} +type ModuleLoader = v1.ModuleLoader -// buildRequiredCapabilities updates the required capabilities on the compiler -// to include any keyword and feature dependencies present in the modules. The -// built-in function dependencies will have already been added by the type -// checker. -func (c *Compiler) buildRequiredCapabilities() { - - features := map[string]struct{}{} - - // extract required keywords from modules - keywords := map[string]struct{}{} - futureKeywordsPrefix := Ref{FutureRootDocument, StringTerm("keywords")} - for _, name := range c.sorted { - for _, imp := range c.imports[name] { - path := imp.Path.Value.(Ref) - switch { - case path.Equal(RegoV1CompatibleRef): - features[FeatureRegoV1Import] = struct{}{} - case path.HasPrefix(futureKeywordsPrefix): - if len(path) == 2 { - for kw := range futureKeywords { - keywords[kw] = struct{}{} - } - } else { - keywords[string(path[2].Value.(String))] = struct{}{} - } - } - } - } - - c.Required.FutureKeywords = stringMapToSortedSlice(keywords) - - // extract required features from modules - - for _, name := range c.sorted { - for _, rule := range c.Modules[name].Rules { - refLen := len(rule.Head.Reference) - if refLen >= 3 { - if refLen > len(rule.Head.Reference.ConstantPrefix()) { - features[FeatureRefHeads] = struct{}{} - } else { - features[FeatureRefHeadStringPrefixes] = struct{}{} - } - } - } - } - - c.Required.Features = stringMapToSortedSlice(features) - - for i, bi := range c.Required.Builtins { - c.Required.Builtins[i] = bi.Minimal() - } -} - -func stringMapToSortedSlice(xs map[string]struct{}) []string { - if len(xs) == 0 { - return nil - } - s := make([]string, 0, len(xs)) - for k := range xs { - s = append(s, k) - } - sort.Strings(s) - return s -} - -// checkRecursion ensures that there are no recursive definitions, i.e., there are -// no cycles in the Graph. -func (c *Compiler) checkRecursion() { - eq := func(a, b util.T) bool { - return a.(*Rule) == b.(*Rule) - } - - c.RuleTree.DepthFirst(func(node *TreeNode) bool { - for _, rule := range node.Values { - for node := rule.(*Rule); node != nil; node = node.Else { - c.checkSelfPath(node.Loc(), eq, node, node) - } - } - return false - }) -} - -func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b util.T) { - tr := NewGraphTraversal(c.Graph) - if p := util.DFSPath(tr, eq, a, b); len(p) > 0 { - n := make([]string, 0, len(p)) - for _, x := range p { - n = append(n, astNodeToString(x)) - } - c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> "))) - } -} - -func astNodeToString(x interface{}) string { - return x.(*Rule).Ref().String() -} - -// checkRuleConflicts ensures that rules definitions are not in conflict. -func (c *Compiler) checkRuleConflicts() { - rw := rewriteVarsInRef(c.RewrittenVars) - - c.RuleTree.DepthFirst(func(node *TreeNode) bool { - if len(node.Values) == 0 { - return false // go deeper - } - - kinds := make(map[RuleKind]struct{}, len(node.Values)) - defaultRules := 0 - completeRules := 0 - partialRules := 0 - arities := make(map[int]struct{}, len(node.Values)) - name := "" - var conflicts []Ref - - for _, rule := range node.Values { - r := rule.(*Rule) - ref := r.Ref() - name = rw(ref.Copy()).String() // varRewriter operates in-place - kinds[r.Head.RuleKind()] = struct{}{} - arities[len(r.Head.Args)] = struct{}{} - if r.Default { - defaultRules++ - } - - // Single-value rules may not have any other rules in their extent. - // Rules with vars in their ref are allowed to have rules inside their extent. - // Only the ground portion (terms before the first var term) of a rule's ref is considered when determining - // whether it's inside the extent of another (c.RuleTree is organized this way already). - // These pairs are invalid: - // - // data.p.q.r { true } # data.p.q is { "r": true } - // data.p.q.r.s { true } - // - // data.p.q.r { true } - // data.p.q.r[s].t { s = input.key } - // - // But this is allowed: - // - // data.p.q.r { true } - // data.p.q[r].s.t { r = input.key } - // - // data.p[r] := x { r = input.key; x = input.bar } - // data.p.q[r] := x { r = input.key; x = input.bar } - // - // data.p.q[r] { r := input.r } - // data.p.q.r.s { true } - // - // data.p.q[r] = 1 { r := "r" } - // data.p.q.s = 2 - // - // data.p[q][r] { q := input.q; r := input.r } - // data.p.q.r { true } - // - // data.p.q[r] { r := input.r } - // data.p[q].r { q := input.q } - // - // data.p.q[r][s] { r := input.r; s := input.s } - // data.p[q].r.s { q := input.q } - - if r.Ref().IsGround() && len(node.Children) > 0 { - conflicts = node.flattenChildren() - } - - if r.Head.RuleKind() == SingleValue && r.Head.Ref().IsGround() { - completeRules++ - } else { - partialRules++ - } - } - - switch { - case conflicts != nil: - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "rule %v conflicts with %v", name, conflicts)) - - case len(kinds) > 1 || len(arities) > 1 || (completeRules >= 1 && partialRules >= 1): - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules %v found", name)) - - case defaultRules > 1: - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "multiple default rules %s found", name)) - } - - return false - }) +// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting +// variables during the safety check. This has to be exported because it's relied on +// by the copy propagation implementation in topdown. +var SafetyCheckVisitorParams = v1.SafetyCheckVisitorParams - if c.pathExists != nil { - for _, err := range CheckPathConflicts(c, c.pathExists) { - c.err(err) - } - } +// ComprehensionIndex specifies how the comprehension term can be indexed. The keys +// tell the evaluator what variables to use for indexing. In the future, the index +// could be expanded with more information that would allow the evaluator to index +// a larger fragment of comprehensions (e.g., by closing over variables in the outer +// query.) +type ComprehensionIndex = v1.ComprehensionIndex - // NOTE(sr): depthfirst might better use sorted for stable errs? - c.ModuleTree.DepthFirst(func(node *ModuleTreeNode) bool { - for _, mod := range node.Modules { - for _, rule := range mod.Rules { - ref := rule.Head.Ref().GroundPrefix() - // Rules with a dynamic portion in their ref are exempted, as a conflict within the dynamic portion - // can only be detected at eval-time. - if len(ref) < len(rule.Head.Ref()) { - continue - } +// ModuleTreeNode represents a node in the module tree. The module +// tree is keyed by the package path. +type ModuleTreeNode = v1.ModuleTreeNode - childNode, tail := node.find(ref) - if childNode != nil && len(tail) == 0 { - for _, childMod := range childNode.Modules { - // Avoid recursively checking a module for equality unless we know it's a possible self-match. - if childMod.Equal(mod) { - continue // don't self-conflict - } - msg := fmt.Sprintf("%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc()) - c.err(NewError(TypeErr, mod.Package.Loc(), msg)) - } - } - } - } - return false - }) -} +// TreeNode represents a node in the rule tree. The rule tree is keyed by +// rule path. +type TreeNode = v1.TreeNode -func (c *Compiler) checkUndefinedFuncs() { - for _, name := range c.sorted { - m := c.Modules[name] - for _, err := range checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars) { - c.err(err) - } - } +// NewRuleTree returns a new TreeNode that represents the root +// of the rule tree populated with the given rules. +func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { + return v1.NewRuleTree(mtree) } -func checkUndefinedFuncs(env *TypeEnv, x interface{}, arity func(Ref) int, rwVars map[Var]Var) Errors { - - var errs Errors - - WalkExprs(x, func(expr *Expr) bool { - if !expr.IsCall() { - return false - } - ref := expr.Operator() - if arity := arity(ref); arity >= 0 { - operands := len(expr.Operands()) - if expr.Generated { // an output var was added - if !expr.IsEquality() && operands != arity+1 { - ref = rewriteVarsInRef(rwVars)(ref) - errs = append(errs, arityMismatchError(env, ref, expr, arity, operands-1)) - return true - } - } else { // either output var or not - if operands != arity && operands != arity+1 { - ref = rewriteVarsInRef(rwVars)(ref) - errs = append(errs, arityMismatchError(env, ref, expr, arity, operands)) - return true - } - } - return false - } - ref = rewriteVarsInRef(rwVars)(ref) - errs = append(errs, NewError(TypeErr, expr.Loc(), "undefined function %v", ref)) - return true - }) - - return errs -} +// Graph represents the graph of dependencies between rules. +type Graph = v1.Graph -func arityMismatchError(env *TypeEnv, f Ref, expr *Expr, exp, act int) *Error { - if want, ok := env.Get(f).(*types.Function); ok { // generate richer error for built-in functions - have := make([]types.Type, len(expr.Operands())) - for i, op := range expr.Operands() { - have[i] = env.Get(op) - } - return newArgError(expr.Loc(), f, "arity mismatch", have, want.NamedFuncArgs()) - } - if act != 1 { - return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d arguments", f, exp, act) - } - return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d argument", f, exp, act) +// NewGraph returns a new Graph based on modules. The list function must return +// the rules referred to directly by the ref. +func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph { + return v1.NewGraph(modules, list) } -// checkSafetyRuleBodies ensures that variables appearing in negated expressions or non-target -// positions of built-in expressions will be bound when evaluating the rule from left -// to right, re-ordering as necessary. -func (c *Compiler) checkSafetyRuleBodies() { - for _, name := range c.sorted { - m := c.Modules[name] - WalkRules(m, func(r *Rule) bool { - safe := ReservedVars.Copy() - safe.Update(r.Head.Args.Vars()) - r.Body = c.checkBodySafety(safe, r.Body) - return false - }) - } -} +// GraphTraversal is a Traversal that understands the dependency graph +type GraphTraversal = v1.GraphTraversal -func (c *Compiler) checkBodySafety(safe VarSet, b Body) Body { - reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b) - if errs := safetyErrorSlice(unsafe, c.RewrittenVars); len(errs) > 0 { - for _, err := range errs { - c.err(err) - } - return b - } - return reordered +// NewGraphTraversal returns a Traversal for the dependency graph +func NewGraphTraversal(graph *Graph) *GraphTraversal { + return v1.NewGraphTraversal(graph) } -// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting -// variables during the safety check. This has to be exported because it's relied on -// by the copy propagation implementation in topdown. -var SafetyCheckVisitorParams = VarVisitorParams{ - SkipRefCallHead: true, - SkipClosures: true, +// OutputVarsFromBody returns all variables which are the "output" for +// the given body. For safety checks this means that they would be +// made safe by the body. +func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { + return v1.OutputVarsFromBody(c, body, safe) } -// checkSafetyRuleHeads ensures that variables appearing in the head of a -// rule also appear in the body. -func (c *Compiler) checkSafetyRuleHeads() { - - for _, name := range c.sorted { - m := c.Modules[name] - WalkRules(m, func(r *Rule) bool { - safe := r.Body.Vars(SafetyCheckVisitorParams) - safe.Update(r.Head.Args.Vars()) - unsafe := r.Head.Vars().Diff(safe) - for v := range unsafe { - if w, ok := c.RewrittenVars[v]; ok { - v = w - } - if !v.IsGenerated() { - c.err(NewError(UnsafeVarErr, r.Loc(), "var %v is unsafe", v)) - } - } - return false - }) - } -} - -func compileSchema(goSchema interface{}, allowNet []string) (*gojsonschema.Schema, error) { - gojsonschema.SetAllowNet(allowNet) - - var refLoader gojsonschema.JSONLoader - sl := gojsonschema.NewSchemaLoader() - - if goSchema != nil { - refLoader = gojsonschema.NewGoLoader(goSchema) - } else { - return nil, fmt.Errorf("no schema as input to compile") - } - schemasCompiled, err := sl.Compile(refLoader) - if err != nil { - return nil, fmt.Errorf("unable to compile the schema: %w", err) - } - return schemasCompiled, nil -} - -func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema, error) { - if len(schemas) == 0 { - return nil, nil - } - var result = schemas[0] - - for i := range schemas { - if len(schemas[i].PropertiesChildren) > 0 { - if !schemas[i].Types.Contains("object") { - if err := schemas[i].Types.Add("object"); err != nil { - return nil, fmt.Errorf("unable to set the type in schemas") - } - } - } else if len(schemas[i].ItemsChildren) > 0 { - if !schemas[i].Types.Contains("array") { - if err := schemas[i].Types.Add("array"); err != nil { - return nil, fmt.Errorf("unable to set the type in schemas") - } - } - } - } - - for i := 1; i < len(schemas); i++ { - if result.Types.String() != schemas[i].Types.String() { - return nil, fmt.Errorf("unable to merge these schemas: type mismatch: %v and %v", result.Types.String(), schemas[i].Types.String()) - } else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 { - result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...) - } else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 { - for j := 0; j < len(schemas[i].ItemsChildren); j++ { - if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) { - result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j]) - } - if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() { - return nil, fmt.Errorf("unable to merge these schemas") - } - } - } - } - return result, nil -} - -type schemaParser struct { - definitionCache map[string]*cachedDef -} - -type cachedDef struct { - properties []*types.StaticProperty -} - -func newSchemaParser() *schemaParser { - return &schemaParser{ - definitionCache: map[string]*cachedDef{}, - } -} - -func (parser *schemaParser) parseSchema(schema interface{}) (types.Type, error) { - return parser.parseSchemaWithPropertyKey(schema, "") -} - -func (parser *schemaParser) parseSchemaWithPropertyKey(schema interface{}, propertyKey string) (types.Type, error) { - subSchema, ok := schema.(*gojsonschema.SubSchema) - if !ok { - return nil, fmt.Errorf("unexpected schema type %v", subSchema) - } - - // Handle referenced schemas, returns directly when a $ref is found - if subSchema.RefSchema != nil { - if existing, ok := parser.definitionCache[subSchema.Ref.String()]; ok { - return types.NewObject(existing.properties, nil), nil - } - return parser.parseSchemaWithPropertyKey(subSchema.RefSchema, subSchema.Ref.String()) - } - - // Handle anyOf - if subSchema.AnyOf != nil { - var orType types.Type - - // If there is a core schema, find its type first - if subSchema.Types.IsTyped() { - copySchema := *subSchema - copySchemaRef := ©Schema - copySchemaRef.AnyOf = nil - coreType, err := parser.parseSchema(copySchemaRef) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v: %w", subSchema, err) - } - - // Only add Object type with static props to orType - if objType, ok := coreType.(*types.Object); ok { - if objType.StaticProperties() != nil && objType.DynamicProperties() == nil { - orType = types.Or(orType, coreType) - } - } - } - - // Iterate through every property of AnyOf and add it to orType - for _, pSchema := range subSchema.AnyOf { - newtype, err := parser.parseSchema(pSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) - } - orType = types.Or(newtype, orType) - } - - return orType, nil - } - - if subSchema.AllOf != nil { - subSchemaArray := subSchema.AllOf - allOfResult, err := mergeSchemas(subSchemaArray...) - if err != nil { - return nil, err - } - - if subSchema.Types.IsTyped() { - if (subSchema.Types.Contains("object") && allOfResult.Types.Contains("object")) || (subSchema.Types.Contains("array") && allOfResult.Types.Contains("array")) { - objectOrArrayResult, err := mergeSchemas(allOfResult, subSchema) - if err != nil { - return nil, err - } - return parser.parseSchema(objectOrArrayResult) - } else if subSchema.Types.String() != allOfResult.Types.String() { - return nil, fmt.Errorf("unable to merge these schemas") - } - } - return parser.parseSchema(allOfResult) - } - - if subSchema.Types.IsTyped() { - if subSchema.Types.Contains("boolean") { - return types.B, nil - - } else if subSchema.Types.Contains("string") { - return types.S, nil - - } else if subSchema.Types.Contains("integer") || subSchema.Types.Contains("number") { - return types.N, nil - - } else if subSchema.Types.Contains("object") { - if len(subSchema.PropertiesChildren) > 0 { - def := &cachedDef{ - properties: make([]*types.StaticProperty, 0, len(subSchema.PropertiesChildren)), - } - for _, pSchema := range subSchema.PropertiesChildren { - def.properties = append(def.properties, types.NewStaticProperty(pSchema.Property, nil)) - } - if propertyKey != "" { - parser.definitionCache[propertyKey] = def - } - for _, pSchema := range subSchema.PropertiesChildren { - newtype, err := parser.parseSchema(pSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) - } - for i, prop := range def.properties { - if prop.Key == pSchema.Property { - def.properties[i].Value = newtype - break - } - } - } - return types.NewObject(def.properties, nil), nil - } - return types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), nil - - } else if subSchema.Types.Contains("array") { - if len(subSchema.ItemsChildren) > 0 { - if subSchema.ItemsChildrenIsSingleSchema { - iSchema := subSchema.ItemsChildren[0] - newtype, err := parser.parseSchema(iSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v", iSchema) - } - return types.NewArray(nil, newtype), nil - } - newTypes := make([]types.Type, 0, len(subSchema.ItemsChildren)) - for i := 0; i != len(subSchema.ItemsChildren); i++ { - iSchema := subSchema.ItemsChildren[i] - newtype, err := parser.parseSchema(iSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v", iSchema) - } - newTypes = append(newTypes, newtype) - } - return types.NewArray(newTypes, nil), nil - } - return types.NewArray(nil, types.A), nil - } - } - - // Assume types if not specified in schema - if len(subSchema.PropertiesChildren) > 0 { - if err := subSchema.Types.Add("object"); err == nil { - return parser.parseSchema(subSchema) - } - } else if len(subSchema.ItemsChildren) > 0 { - if err := subSchema.Types.Add("array"); err == nil { - return parser.parseSchema(subSchema) - } - } - - return types.A, nil -} - -func (c *Compiler) setAnnotationSet() { - // Sorting modules by name for stable error reporting - sorted := make([]*Module, 0, len(c.Modules)) - for _, mName := range c.sorted { - sorted = append(sorted, c.Modules[mName]) - } - - as, errs := BuildAnnotationSet(sorted) - for _, err := range errs { - c.err(err) - } - c.annotationSet = as -} - -// checkTypes runs the type checker on all rules. The type checker builds a -// TypeEnv that is stored on the compiler. -func (c *Compiler) checkTypes() { - // Recursion is caught in earlier step, so this cannot fail. - sorted, _ := c.Graph.Sort() - checker := newTypeChecker(). - WithAllowNet(c.capabilities.AllowNet). - WithSchemaSet(c.schemaSet). - WithInputType(c.inputType). - WithBuiltins(c.builtins). - WithRequiredCapabilities(c.Required). - WithVarRewriter(rewriteVarsInRef(c.RewrittenVars)). - WithAllowUndefinedFunctionCalls(c.allowUndefinedFuncCalls) - var as *AnnotationSet - if c.useTypeCheckAnnotations { - as = c.annotationSet - } - env, errs := checker.CheckTypes(c.TypeEnv, sorted, as) - for _, err := range errs { - c.err(err) - } - c.TypeEnv = env -} - -func (c *Compiler) checkUnsafeBuiltins() { - for _, name := range c.sorted { - errs := checkUnsafeBuiltins(c.unsafeBuiltinsMap, c.Modules[name]) - for _, err := range errs { - c.err(err) - } - } -} - -func (c *Compiler) checkDeprecatedBuiltins() { - for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, mod) - for _, err := range errs { - c.err(err) - } - } - } -} - -func (c *Compiler) runStage(metricName string, f func()) { - if c.metrics != nil { - c.metrics.Timer(metricName).Start() - defer c.metrics.Timer(metricName).Stop() - } - f() -} - -func (c *Compiler) runStageAfter(metricName string, s CompilerStage) *Error { - if c.metrics != nil { - c.metrics.Timer(metricName).Start() - defer c.metrics.Timer(metricName).Stop() - } - return s(c) -} - -func (c *Compiler) compile() { - - defer func() { - if r := recover(); r != nil && r != errLimitReached { - panic(r) - } - }() - - for _, s := range c.stages { - if c.evalMode == EvalModeIR { - switch s.name { - case "BuildRuleIndices", "BuildComprehensionIndices": - continue // skip these stages - } - } - - if c.allowUndefinedFuncCalls && (s.name == "CheckUndefinedFuncs" || s.name == "CheckSafetyRuleBodies") { - continue - } - - c.runStage(s.metricName, s.f) - if c.Failed() { - return - } - for _, a := range c.after[s.name] { - if err := c.runStageAfter(a.MetricName, a.Stage); err != nil { - c.err(err) - return - } - } - } -} - -func (c *Compiler) init() { - - if c.initialized { - return - } - - if c.capabilities == nil { - c.capabilities = CapabilitiesForThisVersion() - } - - c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) - - for _, bi := range c.capabilities.Builtins { - c.builtins[bi.Name] = bi - if bi.IsDeprecated() { - c.deprecatedBuiltinsMap[bi.Name] = struct{}{} - } - } - - for name, bi := range c.customBuiltins { - c.builtins[name] = bi - } - - // Load the global input schema if one was provided. - if c.schemaSet != nil { - if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { - tpe, err := loadSchema(schema, c.capabilities.AllowNet) - if err != nil { - c.err(NewError(TypeErr, nil, err.Error())) - } else { - c.inputType = tpe - } - } - } - - c.TypeEnv = newTypeChecker(). - WithSchemaSet(c.schemaSet). - WithInputType(c.inputType). - Env(c.builtins) - - c.initialized = true -} - -func (c *Compiler) err(err *Error) { - if c.maxErrs > 0 && len(c.Errors) >= c.maxErrs { - c.Errors = append(c.Errors, errLimitReached) - panic(errLimitReached) - } - c.Errors = append(c.Errors, err) -} - -func (c *Compiler) getExports() *util.HashMap { - - rules := util.NewHashMap(func(a, b util.T) bool { - return a.(Ref).Equal(b.(Ref)) - }, func(v util.T) int { - return v.(Ref).Hash() - }) - - for _, name := range c.sorted { - mod := c.Modules[name] - - for _, rule := range mod.Rules { - hashMapAdd(rules, mod.Package.Path, rule.Head.Ref().GroundPrefix()) - } - } - - return rules -} - -func hashMapAdd(rules *util.HashMap, pkg, rule Ref) { - prev, ok := rules.Get(pkg) - if !ok { - rules.Put(pkg, []Ref{rule}) - return - } - for _, p := range prev.([]Ref) { - if p.Equal(rule) { - return - } - } - rules.Put(pkg, append(prev.([]Ref), rule)) -} - -func (c *Compiler) GetAnnotationSet() *AnnotationSet { - return c.annotationSet -} - -func (c *Compiler) checkDuplicateImports() { - modules := make([]*Module, 0, len(c.Modules)) - - for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - modules = append(modules, mod) - } - } - - errs := checkDuplicateImports(modules) - for _, err := range errs { - c.err(err) - } -} - -func (c *Compiler) checkKeywordOverrides() { - for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - errs := checkRootDocumentOverrides(mod) - for _, err := range errs { - c.err(err) - } - } - } -} - -// resolveAllRefs resolves references in expressions to their fully qualified values. -// -// For instance, given the following module: -// -// package a.b -// import data.foo.bar -// p[x] { bar[_] = x } -// -// The reference "bar[_]" would be resolved to "data.foo.bar[_]". -// -// Ref rules are resolved, too: -// -// package a.b -// q { c.d.e == 1 } -// c.d[e] := 1 if e := "e" -// -// The reference "c.d.e" would be resolved to "data.a.b.c.d.e". -func (c *Compiler) resolveAllRefs() { - - rules := c.getExports() - - for _, name := range c.sorted { - mod := c.Modules[name] - - var ruleExports []Ref - if x, ok := rules.Get(mod.Package.Path); ok { - ruleExports = x.([]Ref) - } - - globals := getGlobals(mod.Package, ruleExports, mod.Imports) - - WalkRules(mod, func(rule *Rule) bool { - err := resolveRefsInRule(globals, rule) - if err != nil { - c.err(NewError(CompileErr, rule.Location, err.Error())) - } - return false - }) - - if c.strict { // check for unused imports - for _, imp := range mod.Imports { - path := imp.Path.Value.(Ref) - if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { - continue // ignore future and rego imports - } - - for v, u := range globals { - if v.Equal(imp.Name()) && !u.used { - c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String())) - } - } - } - } - } - - if c.moduleLoader != nil { - - parsed, err := c.moduleLoader(c.Modules) - if err != nil { - c.err(NewError(CompileErr, nil, err.Error())) - return - } - - if len(parsed) == 0 { - return - } - - for id, module := range parsed { - c.Modules[id] = module.Copy() - c.sorted = append(c.sorted, id) - if c.parsedModules != nil { - c.parsedModules[id] = module - } - } - - sort.Strings(c.sorted) - c.resolveAllRefs() - } -} - -func (c *Compiler) removeImports() { - c.imports = make(map[string][]*Import, len(c.Modules)) - for name := range c.Modules { - c.imports[name] = c.Modules[name].Imports - c.Modules[name].Imports = nil - } -} - -func (c *Compiler) initLocalVarGen() { - c.localvargen = newLocalVarGeneratorForModuleSet(c.sorted, c.Modules) -} - -func (c *Compiler) rewriteComprehensionTerms() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - _, _ = rewriteComprehensionTerms(f, mod) // ignore error - } -} - -func (c *Compiler) rewriteExprTerms() { - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - rewriteExprTermsInHead(c.localvargen, rule) - rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body) - return false - }) - } -} - -func (c *Compiler) rewriteRuleHeadRefs() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - WalkRules(c.Modules[name], func(rule *Rule) bool { - - ref := rule.Head.Ref() - // NOTE(sr): We're backfilling Refs here -- all parser code paths would have them, but - // it's possible to construct Module{} instances from Golang code, so we need - // to accommodate for that, too. - if len(rule.Head.Reference) == 0 { - rule.Head.Reference = ref - } - - cannotSpeakStringPrefixRefs := true - cannotSpeakGeneralRefs := true - for _, f := range c.capabilities.Features { - switch f { - case FeatureRefHeadStringPrefixes: - cannotSpeakStringPrefixRefs = false - case FeatureRefHeads: - cannotSpeakGeneralRefs = false - } - } - - if cannotSpeakStringPrefixRefs && cannotSpeakGeneralRefs && rule.Head.Name == "" { - c.err(NewError(CompileErr, rule.Loc(), "rule heads with refs are not supported: %v", rule.Head.Reference)) - return true - } - - for i := 1; i < len(ref); i++ { - if cannotSpeakGeneralRefs && (rule.Head.RuleKind() == MultiValue || i != len(ref)-1) { // last - if _, ok := ref[i].Value.(String); !ok { - c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference)) - continue - } - } - - // Rewrite so that any non-scalar elements in the rule's ref are vars: - // p.q.r[y.z] { ... } => p.q.r[__local0__] { __local0__ = y.z } - // p.q[a.b][c.d] { ... } => p.q[__local0__] { __local0__ = a.b; __local1__ = c.d } - // because that's what the RuleTree knows how to deal with. - if _, ok := ref[i].Value.(Var); !ok && !IsScalar(ref[i].Value) { - expr := f.Generate(ref[i]) - if i == len(ref)-1 && rule.Head.Key.Equal(ref[i]) { - rule.Head.Key = expr.Operand(0) - } - rule.Head.Reference[i] = expr.Operand(0) - rule.Body.Append(expr) - } - } - - return true - }) - } -} - -func (c *Compiler) checkVoidCalls() { - for _, name := range c.sorted { - mod := c.Modules[name] - for _, err := range checkVoidCalls(c.TypeEnv, mod) { - c.err(err) - } - } -} - -func (c *Compiler) rewritePrintCalls() { - var modified bool - if !c.enablePrintStatements { - for _, name := range c.sorted { - if erasePrintCalls(c.Modules[name]) { - modified = true - } - } - } else { - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(r *Rule) bool { - safe := r.Head.Args.Vars() - safe.Update(ReservedVars) - vis := func(b Body) bool { - modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, safe, b) - if modrec { - modified = true - } - for _, err := range errs { - c.err(err) - } - return false - } - WalkBodies(r.Head, vis) - WalkBodies(r.Body, vis) - return false - }) - } - } - if modified { - c.Required.addBuiltinSorted(Print) - } -} - -// checkVoidCalls returns errors for any expressions that treat void function -// calls as values. The only void functions in Rego are specific built-ins like -// print(). -func checkVoidCalls(env *TypeEnv, x interface{}) Errors { - var errs Errors - WalkTerms(x, func(x *Term) bool { - if call, ok := x.Value.(Call); ok { - if tpe, ok := env.Get(call[0]).(*types.Function); ok && tpe.Result() == nil { - errs = append(errs, NewError(TypeErr, x.Loc(), "%v used as value", call)) - } - } - return false - }) - return errs -} - -// rewritePrintCalls will rewrite the body so that print operands are captured -// in local variables and their evaluation occurs within a comprehension. -// Wrapping the terms inside of a comprehension ensures that undefined values do -// not short-circuit evaluation. -// -// For example, given the following print statement: -// -// print("the value of x is:", input.x) -// -// The expression would be rewritten to: -// -// print({__local0__ | __local0__ = "the value of x is:"}, {__local1__ | __local1__ = input.x}) -func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals VarSet, body Body) (bool, Errors) { - - var errs Errors - var modified bool - - // Visit comprehension bodies recursively to ensure print statements inside - // those bodies only close over variables that are safe. - for i := range body { - if ContainsClosures(body[i]) { - safe := outputVarsForBody(body[:i], getArity, globals) - safe.Update(globals) - WalkClosures(body[i], func(x interface{}) bool { - var modrec bool - var errsrec Errors - switch x := x.(type) { - case *SetComprehension: - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - case *ArrayComprehension: - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - case *ObjectComprehension: - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - case *Every: - safe.Update(x.KeyValueVars()) - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - } - if modrec { - modified = true - } - errs = append(errs, errsrec...) - return true - }) - if len(errs) > 0 { - return false, errs - } - } - } - - for i := range body { - - if !isPrintCall(body[i]) { - continue - } - - modified = true - - var errs Errors - safe := outputVarsForBody(body[:i], getArity, globals) - safe.Update(globals) - args := body[i].Operands() - - for j := range args { - vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) - vis.Walk(args[j]) - unsafe := vis.Vars().Diff(safe) - for _, v := range unsafe.Sorted() { - errs = append(errs, NewError(CompileErr, args[j].Loc(), "var %v is undeclared", v)) - } - } - - if len(errs) > 0 { - return false, errs - } - - arr := NewArray() - - for j := range args { - x := NewTerm(gen.Generate()).SetLocation(args[j].Loc()) - capture := Equality.Expr(x, args[j]).SetLocation(args[j].Loc()) - arr = arr.Append(SetComprehensionTerm(x, NewBody(capture)).SetLocation(args[j].Loc())) - } - - body.Set(NewExpr([]*Term{ - NewTerm(InternalPrint.Ref()).SetLocation(body[i].Loc()), - NewTerm(arr).SetLocation(body[i].Loc()), - }).SetLocation(body[i].Loc()), i) - } - - return modified, nil -} - -func erasePrintCalls(node interface{}) bool { - var modified bool - NewGenericVisitor(func(x interface{}) bool { - var modrec bool - switch x := x.(type) { - case *Rule: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *ArrayComprehension: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *SetComprehension: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *ObjectComprehension: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *Every: - modrec, x.Body = erasePrintCallsInBody(x.Body) - } - if modrec { - modified = true - } - return false - }).Walk(node) - return modified -} - -func erasePrintCallsInBody(x Body) (bool, Body) { - - if !containsPrintCall(x) { - return false, x - } - - var cpy Body - - for i := range x { - - // Recursively visit any comprehensions contained in this expression. - erasePrintCalls(x[i]) - - if !isPrintCall(x[i]) { - cpy.Append(x[i]) - } - } - - if len(cpy) == 0 { - term := BooleanTerm(true).SetLocation(x.Loc()) - expr := NewExpr(term).SetLocation(x.Loc()) - cpy.Append(expr) - } - - return true, cpy -} - -func containsPrintCall(x interface{}) bool { - var found bool - WalkExprs(x, func(expr *Expr) bool { - if !found { - if isPrintCall(expr) { - found = true - } - } - return found - }) - return found -} - -func isPrintCall(x *Expr) bool { - return x.IsCall() && x.Operator().Equal(Print.Ref()) -} - -// rewriteRefsInHead will rewrite rules so that the head does not contain any -// terms that require evaluation (e.g., refs or comprehensions). If the key or -// value contains one or more of these terms, the key or value will be moved -// into the body and assigned to a new variable. The new variable will replace -// the key or value in the head. -// -// For instance, given the following rule: -// -// p[{"foo": data.foo[i]}] { i < 100 } -// -// The rule would be re-written as: -// -// p[__local0__] { i < 100; __local0__ = {"foo": data.foo[i]} } -func (c *Compiler) rewriteRefsInHead() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - if requiresEval(rule.Head.Key) { - expr := f.Generate(rule.Head.Key) - rule.Head.Key = expr.Operand(0) - rule.Body.Append(expr) - } - if requiresEval(rule.Head.Value) { - expr := f.Generate(rule.Head.Value) - rule.Head.Value = expr.Operand(0) - rule.Body.Append(expr) - } - for i := 0; i < len(rule.Head.Args); i++ { - if requiresEval(rule.Head.Args[i]) { - expr := f.Generate(rule.Head.Args[i]) - rule.Head.Args[i] = expr.Operand(0) - rule.Body.Append(expr) - } - } - return false - }) - } -} - -func (c *Compiler) rewriteEquals() { - modified := false - for _, name := range c.sorted { - mod := c.Modules[name] - modified = rewriteEquals(mod) || modified - } - if modified { - c.Required.addBuiltinSorted(Equal) - } -} - -func (c *Compiler) rewriteDynamicTerms() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - rule.Body = rewriteDynamics(f, rule.Body) - return false - }) - } -} - -// rewriteTestRuleEqualities rewrites equality expressions in test rule bodies to create local vars for statements that would otherwise -// not have their values captured through tracing, such as refs and comprehensions not unified/assigned to a local var. -// For example, given the following module: -// -// package test -// -// p.q contains v if { -// some v in numbers.range(1, 3) -// } -// -// p.r := "foo" -// -// test_rule { -// p == { -// "q": {4, 5, 6} -// } -// } -// -// `p` in `test_rule` resolves to `data.test.p`, which won't be an entry in the virtual-cache and must therefore be calculated after-the-fact. -// If `p` isn't captured in a local var, there is no trivial way to retrieve its value for test reporting. -func (c *Compiler) rewriteTestRuleEqualities() { - if !c.rewriteTestRulesForTracing { - return - } - - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - if strings.HasPrefix(string(rule.Head.Name), "test_") { - rule.Body = rewriteTestEqualities(f, rule.Body) - } - return false - }) - } -} - -func (c *Compiler) parseMetadataBlocks() { - // Only parse annotations if rego.metadata built-ins are called - regoMetadataCalled := false - for _, name := range c.sorted { - mod := c.Modules[name] - WalkExprs(mod, func(expr *Expr) bool { - if isRegoMetadataChainCall(expr) || isRegoMetadataRuleCall(expr) { - regoMetadataCalled = true - } - return regoMetadataCalled - }) - - if regoMetadataCalled { - break - } - } - - if regoMetadataCalled { - // NOTE: Possible optimization: only parse annotations for modules on the path of rego.metadata-calling module - for _, name := range c.sorted { - mod := c.Modules[name] - - if len(mod.Annotations) == 0 { - var errs Errors - mod.Annotations, errs = parseAnnotations(mod.Comments) - errs = append(errs, attachAnnotationsNodes(mod)...) - for _, err := range errs { - c.err(err) - } - - attachRuleAnnotations(mod) - } - } - } -} - -func (c *Compiler) rewriteRegoMetadataCalls() { - eqFactory := newEqualityFactory(c.localvargen) - - _, chainFuncAllowed := c.builtins[RegoMetadataChain.Name] - _, ruleFuncAllowed := c.builtins[RegoMetadataRule.Name] - - for _, name := range c.sorted { - mod := c.Modules[name] - - WalkRules(mod, func(rule *Rule) bool { - var firstChainCall *Expr - var firstRuleCall *Expr - - WalkExprs(rule, func(expr *Expr) bool { - if chainFuncAllowed && firstChainCall == nil && isRegoMetadataChainCall(expr) { - firstChainCall = expr - } else if ruleFuncAllowed && firstRuleCall == nil && isRegoMetadataRuleCall(expr) { - firstRuleCall = expr - } - return firstChainCall != nil && firstRuleCall != nil - }) - - chainCalled := firstChainCall != nil - ruleCalled := firstRuleCall != nil - - if chainCalled || ruleCalled { - body := make(Body, 0, len(rule.Body)+2) - - var metadataChainVar Var - if chainCalled { - // Create and inject metadata chain for rule - - chain, err := createMetadataChain(c.annotationSet.Chain(rule)) - if err != nil { - c.err(err) - return false - } - - chain.Location = firstChainCall.Location - eq := eqFactory.Generate(chain) - metadataChainVar = eq.Operands()[0].Value.(Var) - body.Append(eq) - } - - var metadataRuleVar Var - if ruleCalled { - // Create and inject metadata for rule - - var metadataRuleTerm *Term - - a := getPrimaryRuleAnnotations(c.annotationSet, rule) - if a != nil { - annotObj, err := a.toObject() - if err != nil { - c.err(err) - return false - } - metadataRuleTerm = NewTerm(*annotObj) - } else { - // If rule has no annotations, assign an empty object - metadataRuleTerm = ObjectTerm() - } - - metadataRuleTerm.Location = firstRuleCall.Location - eq := eqFactory.Generate(metadataRuleTerm) - metadataRuleVar = eq.Operands()[0].Value.(Var) - body.Append(eq) - } - - for _, expr := range rule.Body { - body.Append(expr) - } - rule.Body = body - - vis := func(b Body) bool { - for _, err := range rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars) { - c.err(err) - } - return false - } - WalkBodies(rule.Head, vis) - WalkBodies(rule.Body, vis) - } - - return false - }) - } -} - -func getPrimaryRuleAnnotations(as *AnnotationSet, rule *Rule) *Annotations { - annots := as.GetRuleScope(rule) - - if len(annots) == 0 { - return nil - } - - // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward - sort.SliceStable(annots, func(i, j int) bool { - return annots[i].Location.Compare(annots[j].Location) > 0 - }) - - return annots[0] -} - -func rewriteRegoMetadataCalls(metadataChainVar *Var, metadataRuleVar *Var, body Body, rewrittenVars *map[Var]Var) Errors { - var errs Errors - - WalkClosures(body, func(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - case *SetComprehension: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - case *ObjectComprehension: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - case *Every: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - } - return true - }) - - for i := range body { - expr := body[i] - var metadataVar Var - - if metadataChainVar != nil && isRegoMetadataChainCall(expr) { - metadataVar = *metadataChainVar - } else if metadataRuleVar != nil && isRegoMetadataRuleCall(expr) { - metadataVar = *metadataRuleVar - } else { - continue - } - - // NOTE(johanfylling): An alternative strategy would be to walk the body and replace all operands[0] - // usages with *metadataChainVar - operands := expr.Operands() - var newExpr *Expr - if len(operands) > 0 { // There is an output var to rewrite - rewrittenVar := operands[0] - newExpr = Equality.Expr(rewrittenVar, NewTerm(metadataVar)) - } else { // No output var, just rewrite expr to metadataVar - newExpr = NewExpr(NewTerm(metadataVar)) - } - - newExpr.Generated = true - newExpr.Location = expr.Location - body.Set(newExpr, i) - } - - return errs -} - -func isRegoMetadataChainCall(x *Expr) bool { - return x.IsCall() && x.Operator().Equal(RegoMetadataChain.Ref()) -} - -func isRegoMetadataRuleCall(x *Expr) bool { - return x.IsCall() && x.Operator().Equal(RegoMetadataRule.Ref()) -} - -func createMetadataChain(chain []*AnnotationsRef) (*Term, *Error) { - - metaArray := NewArray() - for _, link := range chain { - p := link.Path.toArray(). - Slice(1, -1) // Dropping leading 'data' element of path - obj := NewObject( - Item(StringTerm("path"), NewTerm(p)), - ) - if link.Annotations != nil { - annotObj, err := link.Annotations.toObject() - if err != nil { - return nil, err - } - obj.Insert(StringTerm("annotations"), NewTerm(*annotObj)) - } - metaArray = metaArray.Append(NewTerm(obj)) - } - - return NewTerm(metaArray), nil -} - -func (c *Compiler) rewriteLocalVars() { - - var assignment bool - - for _, name := range c.sorted { - mod := c.Modules[name] - gen := c.localvargen - - WalkRules(mod, func(rule *Rule) bool { - argsStack := newLocalDeclaredVars() - - args := NewVarVisitor() - if c.strict { - args.Walk(rule.Head.Args) - } - unusedArgs := args.Vars() - - c.rewriteLocalArgVars(gen, argsStack, rule) - - // Rewrite local vars in each else-branch of the rule. - // Note: this is done instead of a walk so that we can capture any unused function arguments - // across else-branches. - for rule := rule; rule != nil; rule = rule.Else { - stack, errs := c.rewriteLocalVarsInRule(rule, unusedArgs, argsStack, gen) - if stack.assignment { - assignment = true - } - - for arg := range unusedArgs { - if stack.Count(arg) > 1 { - delete(unusedArgs, arg) - } - } - - for _, err := range errs { - c.err(err) - } - } - - if c.strict { - // Report an error for each unused function argument - for arg := range unusedArgs { - if !arg.IsWildcard() { - c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg)) - } - } - } - - return true - }) - } - - if assignment { - c.Required.addBuiltinSorted(Assign) - } -} - -func (c *Compiler) rewriteLocalVarsInRule(rule *Rule, unusedArgs VarSet, argsStack *localDeclaredVars, gen *localVarGenerator) (*localDeclaredVars, Errors) { - // Rewrite assignments contained in head of rule. Assignments can - // occur in rule head if they're inside a comprehension. Note, - // assigned vars in comprehensions in the head will be rewritten - // first to preserve scoping rules. For example: - // - // p = [x | x := 1] { x := 2 } becomes p = [__local0__ | __local0__ = 1] { __local1__ = 2 } - // - // This behaviour is consistent scoping inside the body. For example: - // - // p = xs { x := 2; xs = [x | x := 1] } becomes p = xs { __local0__ = 2; xs = [__local1__ | __local1__ = 1] } - nestedXform := &rewriteNestedHeadVarLocalTransform{ - gen: gen, - RewrittenVars: c.RewrittenVars, - strict: c.strict, - } - - NewGenericVisitor(nestedXform.Visit).Walk(rule.Head) - - for _, err := range nestedXform.errs { - c.err(err) - } - - // Rewrite assignments in body. - used := NewVarSet() - - for _, t := range rule.Head.Ref()[1:] { - used.Update(t.Vars()) - } - - if rule.Head.Key != nil { - used.Update(rule.Head.Key.Vars()) - } - - if rule.Head.Value != nil { - valueVars := rule.Head.Value.Vars() - used.Update(valueVars) - for arg := range unusedArgs { - if valueVars.Contains(arg) { - delete(unusedArgs, arg) - } - } - } - - stack := argsStack.Copy() - - body, declared, errs := rewriteLocalVars(gen, stack, used, rule.Body, c.strict) - - // For rewritten vars use the collection of all variables that - // were in the stack at some point in time. - for k, v := range stack.rewritten { - c.RewrittenVars[k] = v - } - - rule.Body = body - - // Rewrite vars in head that refer to locally declared vars in the body. - localXform := rewriteHeadVarLocalTransform{declared: declared} - - for i := range rule.Head.Args { - rule.Head.Args[i], _ = transformTerm(localXform, rule.Head.Args[i]) - } - - for i := 1; i < len(rule.Head.Ref()); i++ { - rule.Head.Reference[i], _ = transformTerm(localXform, rule.Head.Ref()[i]) - } - if rule.Head.Key != nil { - rule.Head.Key, _ = transformTerm(localXform, rule.Head.Key) - } - - if rule.Head.Value != nil { - rule.Head.Value, _ = transformTerm(localXform, rule.Head.Value) - } - return stack, errs -} - -type rewriteNestedHeadVarLocalTransform struct { - gen *localVarGenerator - errs Errors - RewrittenVars map[Var]Var - strict bool -} - -func (xform *rewriteNestedHeadVarLocalTransform) Visit(x interface{}) bool { - - if term, ok := x.(*Term); ok { - - stop := false - stack := newLocalDeclaredVars() - - switch x := term.Value.(type) { - case *object: - cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { - kcpy := k.Copy() - NewGenericVisitor(xform.Visit).Walk(kcpy) - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return kcpy, vcpy, nil - }) - term.Value = cpy - stop = true - case *set: - cpy, _ := x.Map(func(v *Term) (*Term, error) { - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return vcpy, nil - }) - term.Value = cpy - stop = true - case *ArrayComprehension: - xform.errs = rewriteDeclaredVarsInArrayComprehension(xform.gen, stack, x, xform.errs, xform.strict) - stop = true - case *SetComprehension: - xform.errs = rewriteDeclaredVarsInSetComprehension(xform.gen, stack, x, xform.errs, xform.strict) - stop = true - case *ObjectComprehension: - xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs, xform.strict) - stop = true - } - - for k, v := range stack.rewritten { - xform.RewrittenVars[k] = v - } - - return stop - } - - return false -} - -type rewriteHeadVarLocalTransform struct { - declared map[Var]Var -} - -func (xform rewriteHeadVarLocalTransform) Transform(x interface{}) (interface{}, error) { - if v, ok := x.(Var); ok { - if gv, ok := xform.declared[v]; ok { - return gv, nil - } - } - return x, nil -} - -func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) { - - vis := &ruleArgLocalRewriter{ - stack: stack, - gen: gen, - } - - for i := range rule.Head.Args { - Walk(vis, rule.Head.Args[i]) - } - - for i := range vis.errs { - c.err(vis.errs[i]) - } -} - -type ruleArgLocalRewriter struct { - stack *localDeclaredVars - gen *localVarGenerator - errs []*Error -} - -func (vis *ruleArgLocalRewriter) Visit(x interface{}) Visitor { - - t, ok := x.(*Term) - if !ok { - return vis - } - - switch v := t.Value.(type) { - case Var: - gv, ok := vis.stack.Declared(v) - if ok { - vis.stack.Seen(v) - } else { - gv = vis.gen.Generate() - vis.stack.Insert(v, gv, argVar) - } - t.Value = gv - return nil - case *object: - if cpy, err := v.Map(func(k, v *Term) (*Term, *Term, error) { - vcpy := v.Copy() - Walk(vis, vcpy) - return k, vcpy, nil - }); err != nil { - vis.errs = append(vis.errs, NewError(CompileErr, t.Location, err.Error())) - } else { - t.Value = cpy - } - return nil - case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set: - // Scalars are no-ops. Comprehensions are handled above. Sets must not - // contain variables. - return nil - case Call: - vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "rule arguments cannot contain calls")) - return nil - default: - // Recurse on refs and arrays. Any embedded - // variables can be rewritten. - return vis - } -} - -func (c *Compiler) rewriteWithModifiers() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - t := NewGenericTransformer(func(x interface{}) (interface{}, error) { - body, ok := x.(Body) - if !ok { - return x, nil - } - body, err := rewriteWithModifiersInBody(c, c.unsafeBuiltinsMap, f, body) - if err != nil { - c.err(err) - } - - return body, nil - }) - _, _ = Transform(t, mod) // ignore error - } -} - -func (c *Compiler) setModuleTree() { - c.ModuleTree = NewModuleTree(c.Modules) -} - -func (c *Compiler) setRuleTree() { - c.RuleTree = NewRuleTree(c.ModuleTree) -} - -func (c *Compiler) setGraph() { - list := func(r Ref) []*Rule { - return c.GetRulesDynamicWithOpts(r, RulesOptions{IncludeHiddenModules: true}) - } - c.Graph = NewGraph(c.Modules, list) -} - -type queryCompiler struct { - compiler *Compiler - qctx *QueryContext - typeEnv *TypeEnv - rewritten map[Var]Var - after map[string][]QueryCompilerStageDefinition - unsafeBuiltins map[string]struct{} - comprehensionIndices map[*Term]*ComprehensionIndex - enablePrintStatements bool -} - -func newQueryCompiler(compiler *Compiler) QueryCompiler { - qc := &queryCompiler{ - compiler: compiler, - qctx: nil, - after: map[string][]QueryCompilerStageDefinition{}, - comprehensionIndices: map[*Term]*ComprehensionIndex{}, - } - return qc -} - -func (qc *queryCompiler) WithStrict(strict bool) QueryCompiler { - qc.compiler.WithStrict(strict) - return qc -} - -func (qc *queryCompiler) WithEnablePrintStatements(yes bool) QueryCompiler { - qc.enablePrintStatements = yes - return qc -} - -func (qc *queryCompiler) WithContext(qctx *QueryContext) QueryCompiler { - qc.qctx = qctx - return qc -} - -func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler { - qc.after[after] = append(qc.after[after], stage) - return qc -} - -func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler { - qc.unsafeBuiltins = unsafe - return qc -} - -func (qc *queryCompiler) RewrittenVars() map[Var]Var { - return qc.rewritten -} - -func (qc *queryCompiler) ComprehensionIndex(term *Term) *ComprehensionIndex { - if result, ok := qc.comprehensionIndices[term]; ok { - return result - } else if result, ok := qc.compiler.comprehensionIndices[term]; ok { - return result - } - return nil -} - -func (qc *queryCompiler) runStage(metricName string, qctx *QueryContext, query Body, s func(*QueryContext, Body) (Body, error)) (Body, error) { - if qc.compiler.metrics != nil { - qc.compiler.metrics.Timer(metricName).Start() - defer qc.compiler.metrics.Timer(metricName).Stop() - } - return s(qctx, query) -} - -func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCompilerStage) (Body, error) { - if qc.compiler.metrics != nil { - qc.compiler.metrics.Timer(metricName).Start() - defer qc.compiler.metrics.Timer(metricName).Stop() - } - return s(qc, query) -} - -type queryStage = struct { - name string - metricName string - f func(*QueryContext, Body) (Body, error) -} - -func (qc *queryCompiler) Compile(query Body) (Body, error) { - if len(query) == 0 { - return nil, Errors{NewError(CompileErr, nil, "empty query cannot be compiled")} - } - - query = query.Copy() - - stages := []queryStage{ - {"CheckKeywordOverrides", "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides}, - {"ResolveRefs", "query_compile_stage_resolve_refs", qc.resolveRefs}, - {"RewriteLocalVars", "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars}, - {"CheckVoidCalls", "query_compile_stage_check_void_calls", qc.checkVoidCalls}, - {"RewritePrintCalls", "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls}, - {"RewriteExprTerms", "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms}, - {"RewriteComprehensionTerms", "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms}, - {"RewriteWithValues", "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers}, - {"CheckUndefinedFuncs", "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs}, - {"CheckSafety", "query_compile_stage_check_safety", qc.checkSafety}, - {"RewriteDynamicTerms", "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms}, - {"CheckTypes", "query_compile_stage_check_types", qc.checkTypes}, - {"CheckUnsafeBuiltins", "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins}, - {"CheckDeprecatedBuiltins", "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins}, - } - if qc.compiler.evalMode == EvalModeTopdown { - stages = append(stages, queryStage{"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices}) - } - - qctx := qc.qctx.Copy() - - for _, s := range stages { - var err error - query, err = qc.runStage(s.metricName, qctx, query, s.f) - if err != nil { - return nil, qc.applyErrorLimit(err) - } - for _, s := range qc.after[s.name] { - query, err = qc.runStageAfter(s.MetricName, query, s.Stage) - if err != nil { - return nil, qc.applyErrorLimit(err) - } - } - } - - return query, nil -} - -func (qc *queryCompiler) TypeEnv() *TypeEnv { - return qc.typeEnv -} - -func (qc *queryCompiler) applyErrorLimit(err error) error { - var errs Errors - if errors.As(err, &errs) { - if qc.compiler.maxErrs > 0 && len(errs) > qc.compiler.maxErrs { - err = append(errs[:qc.compiler.maxErrs], errLimitReached) - } - } - return err -} - -func (qc *queryCompiler) checkKeywordOverrides(_ *QueryContext, body Body) (Body, error) { - if qc.compiler.strict { - if errs := checkRootDocumentOverrides(body); len(errs) > 0 { - return nil, errs - } - } - return body, nil -} - -func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error) { - - var globals map[Var]*usedRef - - if qctx != nil { - pkg := qctx.Package - // Query compiler ought to generate a package if one was not provided and one or more imports were provided. - // The generated package name could even be an empty string to avoid conflicts (it doesn't have to be valid syntactically) - if pkg == nil && len(qctx.Imports) > 0 { - pkg = &Package{Path: RefTerm(VarTerm("")).Value.(Ref)} - } - if pkg != nil { - var ruleExports []Ref - rules := qc.compiler.getExports() - if exist, ok := rules.Get(pkg.Path); ok { - ruleExports = exist.([]Ref) - } - - globals = getGlobals(qctx.Package, ruleExports, qctx.Imports) - qctx.Imports = nil - } - } - - ignore := &declaredVarStack{declaredVars(body)} - - return resolveRefsInBody(globals, ignore, body), nil -} - -func (qc *queryCompiler) rewriteComprehensionTerms(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - f := newEqualityFactory(gen) - node, err := rewriteComprehensionTerms(f, body) - if err != nil { - return nil, err - } - return node.(Body), nil -} - -func (qc *queryCompiler) rewriteDynamicTerms(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - f := newEqualityFactory(gen) - return rewriteDynamics(f, body), nil -} - -func (qc *queryCompiler) rewriteExprTerms(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - return rewriteExprTermsInBody(gen, body), nil -} - -func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - stack := newLocalDeclaredVars() - body, _, err := rewriteLocalVars(gen, stack, nil, body, qc.compiler.strict) - if len(err) != 0 { - return nil, err - } - qc.rewritten = make(map[Var]Var, len(stack.rewritten)) - for k, v := range stack.rewritten { - // The vars returned during the rewrite will include all seen vars, - // even if they're not declared with an assignment operation. We don't - // want to include these inside the rewritten set though. - qc.rewritten[k] = v - } - return body, nil -} - -func (qc *queryCompiler) rewritePrintCalls(_ *QueryContext, body Body) (Body, error) { - if !qc.enablePrintStatements { - _, cpy := erasePrintCallsInBody(body) - return cpy, nil - } - gen := newLocalVarGenerator("q", body) - if _, errs := rewritePrintCalls(gen, qc.compiler.GetArity, ReservedVars, body); len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) checkVoidCalls(_ *QueryContext, body Body) (Body, error) { - if errs := checkVoidCalls(qc.compiler.TypeEnv, body); len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) checkUndefinedFuncs(_ *QueryContext, body Body) (Body, error) { - if errs := checkUndefinedFuncs(qc.compiler.TypeEnv, body, qc.compiler.GetArity, qc.rewritten); len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) checkSafety(_ *QueryContext, body Body) (Body, error) { - safe := ReservedVars.Copy() - reordered, unsafe := reorderBodyForSafety(qc.compiler.builtins, qc.compiler.GetArity, safe, body) - if errs := safetyErrorSlice(unsafe, qc.RewrittenVars()); len(errs) > 0 { - return nil, errs - } - return reordered, nil -} - -func (qc *queryCompiler) checkTypes(_ *QueryContext, body Body) (Body, error) { - var errs Errors - checker := newTypeChecker(). - WithSchemaSet(qc.compiler.schemaSet). - WithInputType(qc.compiler.inputType). - WithVarRewriter(rewriteVarsInRef(qc.rewritten, qc.compiler.RewrittenVars)) - qc.typeEnv, errs = checker.CheckBody(qc.compiler.TypeEnv, body) - if len(errs) > 0 { - return nil, errs - } - - return body, nil -} - -func (qc *queryCompiler) checkUnsafeBuiltins(_ *QueryContext, body Body) (Body, error) { - errs := checkUnsafeBuiltins(qc.unsafeBuiltinsMap(), body) - if len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) unsafeBuiltinsMap() map[string]struct{} { - if qc.unsafeBuiltins != nil { - return qc.unsafeBuiltins - } - return qc.compiler.unsafeBuiltinsMap -} - -func (qc *queryCompiler) checkDeprecatedBuiltins(_ *QueryContext, body Body) (Body, error) { - if qc.compiler.strict { - errs := checkDeprecatedBuiltins(qc.compiler.deprecatedBuiltinsMap, body) - if len(errs) > 0 { - return nil, errs - } - } - return body, nil -} - -func (qc *queryCompiler) rewriteWithModifiers(_ *QueryContext, body Body) (Body, error) { - f := newEqualityFactory(newLocalVarGenerator("q", body)) - body, err := rewriteWithModifiersInBody(qc.compiler, qc.unsafeBuiltinsMap(), f, body) - if err != nil { - return nil, Errors{err} - } - return body, nil -} - -func (qc *queryCompiler) buildComprehensionIndices(_ *QueryContext, body Body) (Body, error) { - // NOTE(tsandall): The query compiler does not have a metrics object so we - // cannot record index metrics currently. - _ = buildComprehensionIndices(qc.compiler.debug, qc.compiler.GetArity, ReservedVars, qc.RewrittenVars(), body, qc.comprehensionIndices) - return body, nil -} - -// ComprehensionIndex specifies how the comprehension term can be indexed. The keys -// tell the evaluator what variables to use for indexing. In the future, the index -// could be expanded with more information that would allow the evaluator to index -// a larger fragment of comprehensions (e.g., by closing over variables in the outer -// query.) -type ComprehensionIndex struct { - Term *Term - Keys []*Term -} - -func (ci *ComprehensionIndex) String() string { - if ci == nil { - return "" - } - return fmt.Sprintf("", NewArray(ci.Keys...)) -} - -func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, node interface{}, result map[*Term]*ComprehensionIndex) uint64 { - var n uint64 - cpy := candidates.Copy() - WalkBodies(node, func(b Body) bool { - for _, expr := range b { - index := getComprehensionIndex(dbg, arity, cpy, rwVars, expr) - if index != nil { - result[index.Term] = index - n++ - } - // Any variables appearing in the expressions leading up to the comprehension - // are fair-game to be used as index keys. - cpy.Update(expr.Vars(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true})) - } - return false - }) - return n -} - -func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, expr *Expr) *ComprehensionIndex { - - // Ignore everything except = expressions. Extract - // the comprehension term from the expression. - if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 { - // No debug message, these are assumed to be known hinderances - // to comprehension indexing. - return nil - } - - var term *Term - - lhs, rhs := expr.Operand(0), expr.Operand(1) - - if _, ok := lhs.Value.(Var); ok && IsComprehension(rhs.Value) { - term = rhs - } else if _, ok := rhs.Value.(Var); ok && IsComprehension(lhs.Value) { - term = lhs - } - - if term == nil { - // no debug for this, it's the ordinary "nothing to do here" case - return nil - } - - // Ignore comprehensions that contain expressions that close over variables - // in the outer body if those variables are not also output variables in the - // comprehension body. In other words, ignore comprehensions that we cannot - // safely evaluate without bindings from the outer body. For example: - // - // x = [1] - // [true | data.y[z] = x] # safe to evaluate w/o outer body - // [true | data.y[z] = x[0]] # NOT safe to evaluate because 'x' would be unsafe. - // - // By identifying output variables in the body we also know what to index on by - // intersecting with candidate variables from the outer query. - // - // For example: - // - // x = data.foo[_] - // _ = [y | data.bar[y] = x] # index on 'x' - // - // This query goes from O(data.foo*data.bar) to O(data.foo+data.bar). - var body Body - - switch x := term.Value.(type) { - case *ArrayComprehension: - body = x.Body - case *SetComprehension: - body = x.Body - case *ObjectComprehension: - body = x.Body - } - - outputs := outputVarsForBody(body, arity, ReservedVars) - unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars) - - if len(unsafe) > 0 { - dbg.Printf("%s: comprehension index: unsafe vars: %v", expr.Location, unsafe) - return nil - } - - // Similarly, ignore comprehensions that contain references with output variables - // that intersect with the candidates. Indexing these comprehensions could worsen - // performance. - regressionVis := newComprehensionIndexRegressionCheckVisitor(candidates) - regressionVis.Walk(body) - if regressionVis.worse { - dbg.Printf("%s: comprehension index: output vars intersect candidates", expr.Location) - return nil - } - - // Check if any nested comprehensions close over candidates. If any intersection is found - // the comprehension cannot be cached because it would require closing over the candidates - // which the evaluator does not support today. - nestedVis := newComprehensionIndexNestedCandidateVisitor(candidates) - nestedVis.Walk(body) - if nestedVis.found { - dbg.Printf("%s: comprehension index: nested comprehensions close over candidates", expr.Location) - return nil - } - - // Make a sorted set of variable names that will serve as the index key set. - // Sort to ensure deterministic indexing. In future this could be relaxed - // if we can decide that one ordering is better than another. If the set is - // empty, there is no indexing to do. - indexVars := candidates.Intersect(outputs) - if len(indexVars) == 0 { - dbg.Printf("%s: comprehension index: no index vars", expr.Location) - return nil - } - - result := make([]*Term, 0, len(indexVars)) - - for v := range indexVars { - result = append(result, NewTerm(v)) - } - - sort.Slice(result, func(i, j int) bool { - return result[i].Value.Compare(result[j].Value) < 0 - }) - - debugRes := make([]*Term, len(result)) - for i, r := range result { - if o, ok := rwVars[r.Value.(Var)]; ok { - debugRes[i] = NewTerm(o) - } else { - debugRes[i] = r - } - } - dbg.Printf("%s: comprehension index: built with keys: %v", expr.Location, debugRes) - return &ComprehensionIndex{Term: term, Keys: result} -} - -type comprehensionIndexRegressionCheckVisitor struct { - candidates VarSet - seen VarSet - worse bool -} - -// TODO(tsandall): Improve this so that users can either supply this list explicitly -// or the information is maintained on the built-in function declaration. What we really -// need to know is whether the built-in function allows callers to push down output -// values or not. It's unlikely that anything outside of OPA does this today so this -// solution is fine for now. -var comprehensionIndexBlacklist = map[string]int{ - WalkBuiltin.Name: len(WalkBuiltin.Decl.FuncArgs().Args), -} - -func newComprehensionIndexRegressionCheckVisitor(candidates VarSet) *comprehensionIndexRegressionCheckVisitor { - return &comprehensionIndexRegressionCheckVisitor{ - candidates: candidates, - seen: NewVarSet(), - } -} - -func (vis *comprehensionIndexRegressionCheckVisitor) Walk(x interface{}) { - NewGenericVisitor(vis.visit).Walk(x) -} - -func (vis *comprehensionIndexRegressionCheckVisitor) visit(x interface{}) bool { - if !vis.worse { - switch x := x.(type) { - case *Expr: - operands := x.Operands() - if pos := comprehensionIndexBlacklist[x.Operator().String()]; pos > 0 && pos < len(operands) { - vis.assertEmptyIntersection(operands[pos].Vars()) - } - case Ref: - vis.assertEmptyIntersection(x.OutputVars()) - case Var: - vis.seen.Add(x) - // Always skip comprehensions. We do not have to visit their bodies here. - case *ArrayComprehension, *SetComprehension, *ObjectComprehension: - return true - } - } - return vis.worse -} - -func (vis *comprehensionIndexRegressionCheckVisitor) assertEmptyIntersection(vs VarSet) { - for v := range vs { - if vis.candidates.Contains(v) && !vis.seen.Contains(v) { - vis.worse = true - return - } - } -} - -type comprehensionIndexNestedCandidateVisitor struct { - candidates VarSet - found bool -} - -func newComprehensionIndexNestedCandidateVisitor(candidates VarSet) *comprehensionIndexNestedCandidateVisitor { - return &comprehensionIndexNestedCandidateVisitor{ - candidates: candidates, - } -} - -func (vis *comprehensionIndexNestedCandidateVisitor) Walk(x interface{}) { - NewGenericVisitor(vis.visit).Walk(x) -} - -func (vis *comprehensionIndexNestedCandidateVisitor) visit(x interface{}) bool { - - if vis.found { - return true - } - - if v, ok := x.(Value); ok && IsComprehension(v) { - varVis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) - varVis.Walk(v) - vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0 - return true - } - - return false -} - -// ModuleTreeNode represents a node in the module tree. The module -// tree is keyed by the package path. -type ModuleTreeNode struct { - Key Value - Modules []*Module - Children map[Value]*ModuleTreeNode - Hide bool -} - -func (n *ModuleTreeNode) String() string { - var rules []string - for _, m := range n.Modules { - for _, r := range m.Rules { - rules = append(rules, r.Head.String()) - } - } - return fmt.Sprintf("", n.Key, n.Children, rules, n.Hide) -} - -// NewModuleTree returns a new ModuleTreeNode that represents the root -// of the module tree populated with the given modules. -func NewModuleTree(mods map[string]*Module) *ModuleTreeNode { - root := &ModuleTreeNode{ - Children: map[Value]*ModuleTreeNode{}, - } - names := make([]string, 0, len(mods)) - for name := range mods { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - m := mods[name] - node := root - for i, x := range m.Package.Path { - c, ok := node.Children[x.Value] - if !ok { - var hide bool - if i == 1 && x.Value.Compare(SystemDocumentKey) == 0 { - hide = true - } - c = &ModuleTreeNode{ - Key: x.Value, - Children: map[Value]*ModuleTreeNode{}, - Hide: hide, - } - node.Children[x.Value] = c - } - node = c - } - node.Modules = append(node.Modules, m) - } - return root -} - -// Size returns the number of modules in the tree. -func (n *ModuleTreeNode) Size() int { - s := len(n.Modules) - for _, c := range n.Children { - s += c.Size() - } - return s -} - -// Child returns n's child with key k. -func (n *ModuleTreeNode) child(k Value) *ModuleTreeNode { - switch k.(type) { - case String, Var: - return n.Children[k] - } - return nil -} - -// Find dereferences ref along the tree. ref[0] is converted to a String -// for convenience. -func (n *ModuleTreeNode) find(ref Ref) (*ModuleTreeNode, Ref) { - if v, ok := ref[0].Value.(Var); ok { - ref = Ref{StringTerm(string(v))}.Concat(ref[1:]) - } - node := n - for i, r := range ref { - next := node.child(r.Value) - if next == nil { - tail := make(Ref, len(ref)-i) - tail[0] = VarTerm(string(ref[i].Value.(String))) - copy(tail[1:], ref[i+1:]) - return node, tail - } - node = next - } - return node, nil -} - -// DepthFirst performs a depth-first traversal of the module tree rooted at n. -// If f returns true, traversal will not continue to the children of n. -func (n *ModuleTreeNode) DepthFirst(f func(*ModuleTreeNode) bool) { - if f(n) { - return - } - for _, node := range n.Children { - node.DepthFirst(f) - } -} - -// TreeNode represents a node in the rule tree. The rule tree is keyed by -// rule path. -type TreeNode struct { - Key Value - Values []util.T - Children map[Value]*TreeNode - Sorted []Value - Hide bool -} - -func (n *TreeNode) String() string { - return fmt.Sprintf("", n.Key, n.Values, n.Sorted, n.Hide) -} - -// NewRuleTree returns a new TreeNode that represents the root -// of the rule tree populated with the given rules. -func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { - root := TreeNode{ - Key: mtree.Key, - } - - mtree.DepthFirst(func(m *ModuleTreeNode) bool { - for _, mod := range m.Modules { - if len(mod.Rules) == 0 { - root.add(mod.Package.Path, nil) - } - for _, rule := range mod.Rules { - root.add(rule.Ref().GroundPrefix(), rule) - } - } - return false - }) - - // ensure that data.system's TreeNode is hidden - node, tail := root.find(DefaultRootRef.Append(NewTerm(SystemDocumentKey))) - if len(tail) == 0 { // found - node.Hide = true - } - - root.DepthFirst(func(x *TreeNode) bool { - x.sort() - return false - }) - - return &root -} - -func (n *TreeNode) add(path Ref, rule *Rule) { - node, tail := n.find(path) - if len(tail) > 0 { - sub := treeNodeFromRef(tail, rule) - if node.Children == nil { - node.Children = make(map[Value]*TreeNode, 1) - } - node.Children[sub.Key] = sub - node.Sorted = append(node.Sorted, sub.Key) - } else { - if rule != nil { - node.Values = append(node.Values, rule) - } - } -} - -// Size returns the number of rules in the tree. -func (n *TreeNode) Size() int { - s := len(n.Values) - for _, c := range n.Children { - s += c.Size() - } - return s -} - -// Child returns n's child with key k. -func (n *TreeNode) Child(k Value) *TreeNode { - switch k.(type) { - case Ref, Call: - return nil - default: - return n.Children[k] - } -} - -// Find dereferences ref along the tree -func (n *TreeNode) Find(ref Ref) *TreeNode { - node := n - for _, r := range ref { - node = node.Child(r.Value) - if node == nil { - return nil - } - } - return node -} - -// Iteratively dereferences ref along the node's subtree. -// - If matching fails immediately, the tail will contain the full ref. -// - Partial matching will result in a tail of non-zero length. -// - A complete match will result in a 0 length tail. -func (n *TreeNode) find(ref Ref) (*TreeNode, Ref) { - node := n - for i := range ref { - next := node.Child(ref[i].Value) - if next == nil { - tail := make(Ref, len(ref)-i) - copy(tail, ref[i:]) - return node, tail - } - node = next - } - return node, nil -} - -// DepthFirst performs a depth-first traversal of the rule tree rooted at n. If -// f returns true, traversal will not continue to the children of n. -func (n *TreeNode) DepthFirst(f func(*TreeNode) bool) { - if f(n) { - return - } - for _, node := range n.Children { - node.DepthFirst(f) - } -} - -func (n *TreeNode) sort() { - sort.Slice(n.Sorted, func(i, j int) bool { - return n.Sorted[i].Compare(n.Sorted[j]) < 0 - }) -} - -func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode { - depth := len(ref) - 1 - key := ref[depth].Value - node := &TreeNode{ - Key: key, - Children: nil, - } - if rule != nil { - node.Values = []util.T{rule} - } - - for i := len(ref) - 2; i >= 0; i-- { - key := ref[i].Value - node = &TreeNode{ - Key: key, - Children: map[Value]*TreeNode{ref[i+1].Value: node}, - Sorted: []Value{ref[i+1].Value}, - } - } - return node -} - -// flattenChildren flattens all children's rule refs into a sorted array. -func (n *TreeNode) flattenChildren() []Ref { - ret := newRefSet() - for _, sub := range n.Children { // we only want the children, so don't use n.DepthFirst() right away - sub.DepthFirst(func(x *TreeNode) bool { - for _, r := range x.Values { - rule := r.(*Rule) - ret.AddPrefix(rule.Ref()) - } - return false - }) - } - - sort.Slice(ret.s, func(i, j int) bool { - return ret.s[i].Compare(ret.s[j]) < 0 - }) - return ret.s -} - -// Graph represents the graph of dependencies between rules. -type Graph struct { - adj map[util.T]map[util.T]struct{} - radj map[util.T]map[util.T]struct{} - nodes map[util.T]struct{} - sorted []util.T -} - -// NewGraph returns a new Graph based on modules. The list function must return -// the rules referred to directly by the ref. -func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph { - - graph := &Graph{ - adj: map[util.T]map[util.T]struct{}{}, - radj: map[util.T]map[util.T]struct{}{}, - nodes: map[util.T]struct{}{}, - sorted: nil, - } - - // Create visitor to walk a rule AST and add edges to the rule graph for - // each dependency. - vis := func(a *Rule) *GenericVisitor { - stop := false - return NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case Ref: - for _, b := range list(x) { - for node := b; node != nil; node = node.Else { - graph.addDependency(a, node) - } - } - case *Rule: - if stop { - // Do not recurse into else clauses (which will be handled - // by the outer visitor.) - return true - } - stop = true - } - return false - }) - } - - // Walk over all rules, add them to graph, and build adjacency lists. - for _, module := range modules { - WalkRules(module, func(a *Rule) bool { - graph.addNode(a) - vis(a).Walk(a) - return false - }) - } - - return graph -} - -// Dependencies returns the set of rules that x depends on. -func (g *Graph) Dependencies(x util.T) map[util.T]struct{} { - return g.adj[x] -} - -// Dependents returns the set of rules that depend on x. -func (g *Graph) Dependents(x util.T) map[util.T]struct{} { - return g.radj[x] -} - -// Sort returns a slice of rules sorted by dependencies. If a cycle is found, -// ok is set to false. -func (g *Graph) Sort() (sorted []util.T, ok bool) { - if g.sorted != nil { - return g.sorted, true - } - - sorter := &graphSort{ - sorted: make([]util.T, 0, len(g.nodes)), - deps: g.Dependencies, - marked: map[util.T]struct{}{}, - temp: map[util.T]struct{}{}, - } - - for node := range g.nodes { - if !sorter.Visit(node) { - return nil, false - } - } - - g.sorted = sorter.sorted - return g.sorted, true -} - -func (g *Graph) addDependency(u util.T, v util.T) { - - if _, ok := g.nodes[u]; !ok { - g.addNode(u) - } - - if _, ok := g.nodes[v]; !ok { - g.addNode(v) - } - - edges, ok := g.adj[u] - if !ok { - edges = map[util.T]struct{}{} - g.adj[u] = edges - } - - edges[v] = struct{}{} - - edges, ok = g.radj[v] - if !ok { - edges = map[util.T]struct{}{} - g.radj[v] = edges - } - - edges[u] = struct{}{} -} - -func (g *Graph) addNode(n util.T) { - g.nodes[n] = struct{}{} -} - -type graphSort struct { - sorted []util.T - deps func(util.T) map[util.T]struct{} - marked map[util.T]struct{} - temp map[util.T]struct{} -} - -func (sort *graphSort) Marked(node util.T) bool { - _, marked := sort.marked[node] - return marked -} - -func (sort *graphSort) Visit(node util.T) (ok bool) { - if _, ok := sort.temp[node]; ok { - return false - } - if sort.Marked(node) { - return true - } - sort.temp[node] = struct{}{} - for other := range sort.deps(node) { - if !sort.Visit(other) { - return false - } - } - sort.marked[node] = struct{}{} - delete(sort.temp, node) - sort.sorted = append(sort.sorted, node) - return true -} - -// GraphTraversal is a Traversal that understands the dependency graph -type GraphTraversal struct { - graph *Graph - visited map[util.T]struct{} -} - -// NewGraphTraversal returns a Traversal for the dependency graph -func NewGraphTraversal(graph *Graph) *GraphTraversal { - return &GraphTraversal{ - graph: graph, - visited: map[util.T]struct{}{}, - } -} - -// Edges lists all dependency connections for a given node -func (g *GraphTraversal) Edges(x util.T) []util.T { - r := []util.T{} - for v := range g.graph.Dependencies(x) { - r = append(r, v) - } - return r -} - -// Visited returns whether a node has been visited, setting a node to visited if not -func (g *GraphTraversal) Visited(u util.T) bool { - _, ok := g.visited[u] - g.visited[u] = struct{}{} - return ok -} - -type unsafePair struct { - Expr *Expr - Vars VarSet -} - -type unsafeVarLoc struct { - Var Var - Loc *Location -} - -type unsafeVars map[*Expr]VarSet - -func (vs unsafeVars) Add(e *Expr, v Var) { - if u, ok := vs[e]; ok { - u[v] = struct{}{} - } else { - vs[e] = VarSet{v: struct{}{}} - } -} - -func (vs unsafeVars) Set(e *Expr, s VarSet) { - vs[e] = s -} - -func (vs unsafeVars) Update(o unsafeVars) { - for k, v := range o { - if _, ok := vs[k]; !ok { - vs[k] = VarSet{} - } - vs[k].Update(v) - } -} - -func (vs unsafeVars) Vars() (result []unsafeVarLoc) { - - locs := map[Var]*Location{} - - // If var appears in multiple sets then pick first by location. - for expr, vars := range vs { - for v := range vars { - if locs[v].Compare(expr.Location) > 0 { - locs[v] = expr.Location - } - } - } - - for v, loc := range locs { - result = append(result, unsafeVarLoc{ - Var: v, - Loc: loc, - }) - } - - sort.Slice(result, func(i, j int) bool { - return result[i].Loc.Compare(result[j].Loc) < 0 - }) - - return result -} - -func (vs unsafeVars) Slice() (result []unsafePair) { - for expr, vs := range vs { - result = append(result, unsafePair{ - Expr: expr, - Vars: vs, - }) - } - return -} - -// reorderBodyForSafety returns a copy of the body ordered such that -// left to right evaluation of the body will not encounter unbound variables -// in input positions or negated expressions. -// -// Expressions are added to the re-ordered body as soon as they are considered -// safe. If multiple expressions become safe in the same pass, they are added -// in their original order. This results in minimal re-ordering of the body. -// -// If the body cannot be reordered to ensure safety, the second return value -// contains a mapping of expressions to unsafe variables in those expressions. -func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) { - - bodyVars := body.Vars(SafetyCheckVisitorParams) - reordered := make(Body, 0, len(body)) - safe := VarSet{} - unsafe := unsafeVars{} - - for _, e := range body { - for v := range e.Vars(SafetyCheckVisitorParams) { - if globals.Contains(v) { - safe.Add(v) - } else { - unsafe.Add(e, v) - } - } - } - - for { - n := len(reordered) - - for _, e := range body { - if reordered.Contains(e) { - continue - } - - ovs := outputVarsForExpr(e, arity, safe) - - // check closures: is this expression closing over variables that - // haven't been made safe by what's already included in `reordered`? - vs := unsafeVarsInClosures(e) - cv := vs.Intersect(bodyVars).Diff(globals) - uv := cv.Diff(outputVarsForBody(reordered, arity, safe)) - - if len(uv) > 0 { - if uv.Equal(ovs) { // special case "closure-self" - continue - } - unsafe.Set(e, uv) - } - - for v := range unsafe[e] { - if ovs.Contains(v) || safe.Contains(v) { - delete(unsafe[e], v) - } - } - - if len(unsafe[e]) == 0 { - delete(unsafe, e) - reordered.Append(e) - safe.Update(ovs) // this expression's outputs are safe - } - } - - if len(reordered) == n { // fixed point, could not add any expr of body - break - } - } - - // Recursively visit closures and perform the safety checks on them. - // Update the globals at each expression to include the variables that could - // be closed over. - g := globals.Copy() - for i, e := range reordered { - if i > 0 { - g.Update(reordered[i-1].Vars(SafetyCheckVisitorParams)) - } - xform := &bodySafetyTransformer{ - builtins: builtins, - arity: arity, - current: e, - globals: g, - unsafe: unsafe, - } - NewGenericVisitor(xform.Visit).Walk(e) - } - - return reordered, unsafe -} - -type bodySafetyTransformer struct { - builtins map[string]*Builtin - arity func(Ref) int - current *Expr - globals VarSet - unsafe unsafeVars -} - -func (xform *bodySafetyTransformer) Visit(x interface{}) bool { - switch term := x.(type) { - case *Term: - switch x := term.Value.(type) { - case *object: - cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { - kcpy := k.Copy() - NewGenericVisitor(xform.Visit).Walk(kcpy) - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return kcpy, vcpy, nil - }) - term.Value = cpy - return true - case *set: - cpy, _ := x.Map(func(v *Term) (*Term, error) { - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return vcpy, nil - }) - term.Value = cpy - return true - case *ArrayComprehension: - xform.reorderArrayComprehensionSafety(x) - return true - case *ObjectComprehension: - xform.reorderObjectComprehensionSafety(x) - return true - case *SetComprehension: - xform.reorderSetComprehensionSafety(x) - return true - } - case *Expr: - if ev, ok := term.Terms.(*Every); ok { - xform.globals.Update(ev.KeyValueVars()) - ev.Body = xform.reorderComprehensionSafety(NewVarSet(), ev.Body) - return true - } - } - return false -} - -func (xform *bodySafetyTransformer) reorderComprehensionSafety(tv VarSet, body Body) Body { - bv := body.Vars(SafetyCheckVisitorParams) - bv.Update(xform.globals) - uv := tv.Diff(bv) - for v := range uv { - xform.unsafe.Add(xform.current, v) - } - - r, u := reorderBodyForSafety(xform.builtins, xform.arity, xform.globals, body) - if len(u) == 0 { - return r - } - - xform.unsafe.Update(u) - return body -} - -func (xform *bodySafetyTransformer) reorderArrayComprehensionSafety(ac *ArrayComprehension) { - ac.Body = xform.reorderComprehensionSafety(ac.Term.Vars(), ac.Body) -} - -func (xform *bodySafetyTransformer) reorderObjectComprehensionSafety(oc *ObjectComprehension) { - tv := oc.Key.Vars() - tv.Update(oc.Value.Vars()) - oc.Body = xform.reorderComprehensionSafety(tv, oc.Body) -} - -func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetComprehension) { - sc.Body = xform.reorderComprehensionSafety(sc.Term.Vars(), sc.Body) -} - -// unsafeVarsInClosures collects vars that are contained in closures within -// this expression. -func unsafeVarsInClosures(e *Expr) VarSet { - vs := VarSet{} - WalkClosures(e, func(x interface{}) bool { - vis := &VarVisitor{vars: vs} - if ev, ok := x.(*Every); ok { - vis.Walk(ev.Body) - return true - } - vis.Walk(x) - return true - }) - return vs -} - -// OutputVarsFromBody returns all variables which are the "output" for -// the given body. For safety checks this means that they would be -// made safe by the body. -func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { - return outputVarsForBody(body, c.GetArity, safe) -} - -func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet) VarSet { - o := safe.Copy() - for _, e := range body { - o.Update(outputVarsForExpr(e, arity, o)) - } - return o.Diff(safe) -} - -// OutputVarsFromExpr returns all variables which are the "output" for -// the given expression. For safety checks this means that they would be -// made safe by the expr. -func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { - return outputVarsForExpr(expr, c.GetArity, safe) -} - -func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet) VarSet { - - // Negated expressions must be safe. - if expr.Negated { - return VarSet{} - } - - // With modifier inputs must be safe. - for _, with := range expr.With { - vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) - vis.Walk(with) - vars := vis.Vars() - unsafe := vars.Diff(safe) - if len(unsafe) > 0 { - return VarSet{} - } - } - - switch terms := expr.Terms.(type) { - case *Term: - return outputVarsForTerms(expr, safe) - case []*Term: - if expr.IsEquality() { - return outputVarsForExprEq(expr, safe) - } - - operator, ok := terms[0].Value.(Ref) - if !ok { - return VarSet{} - } - - ar := arity(operator) - if ar < 0 { - return VarSet{} - } - - return outputVarsForExprCall(expr, ar, safe, terms) - case *Every: - return outputVarsForTerms(terms.Domain, safe) - default: - panic("illegal expression") - } -} - -func outputVarsForExprEq(expr *Expr, safe VarSet) VarSet { - - if !validEqAssignArgCount(expr) { - return safe - } - - output := outputVarsForTerms(expr, safe) - output.Update(safe) - output.Update(Unify(output, expr.Operand(0), expr.Operand(1))) - - return output.Diff(safe) -} - -func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term) VarSet { - - output := outputVarsForTerms(expr, safe) - - numInputTerms := arity + 1 - if numInputTerms >= len(terms) { - return output - } - - params := VarVisitorParams{ - SkipClosures: true, - SkipSets: true, - SkipObjectKeys: true, - SkipRefHead: true, - } - vis := NewVarVisitor().WithParams(params) - vis.Walk(Args(terms[:numInputTerms])) - unsafe := vis.Vars().Diff(output).Diff(safe) - - if len(unsafe) > 0 { - return VarSet{} - } - - vis = NewVarVisitor().WithParams(params) - vis.Walk(Args(terms[numInputTerms:])) - output.Update(vis.vars) - return output -} - -func outputVarsForTerms(expr interface{}, safe VarSet) VarSet { - output := VarSet{} - WalkTerms(expr, func(x *Term) bool { - switch r := x.Value.(type) { - case *SetComprehension, *ArrayComprehension, *ObjectComprehension: - return true - case Ref: - if !isRefSafe(r, safe) { - return true - } - output.Update(r.OutputVars()) - return false - } - return false - }) - return output -} - -type equalityFactory struct { - gen *localVarGenerator -} - -func newEqualityFactory(gen *localVarGenerator) *equalityFactory { - return &equalityFactory{gen} -} - -func (f *equalityFactory) Generate(other *Term) *Expr { - term := NewTerm(f.gen.Generate()).SetLocation(other.Location) - expr := Equality.Expr(term, other) - expr.Generated = true - expr.Location = other.Location - return expr -} - -type localVarGenerator struct { - exclude VarSet - suffix string - next int -} - -func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Module) *localVarGenerator { - exclude := NewVarSet() - vis := &VarVisitor{vars: exclude} - for _, key := range sorted { - vis.Walk(modules[key]) - } - return &localVarGenerator{exclude: exclude, next: 0} -} - -func newLocalVarGenerator(suffix string, node interface{}) *localVarGenerator { - exclude := NewVarSet() - vis := &VarVisitor{vars: exclude} - vis.Walk(node) - return &localVarGenerator{exclude: exclude, suffix: suffix, next: 0} -} - -func (l *localVarGenerator) Generate() Var { - for { - result := Var("__local" + l.suffix + strconv.Itoa(l.next) + "__") - l.next++ - if !l.exclude.Contains(result) { - return result - } - } -} - -func getGlobals(pkg *Package, rules []Ref, imports []*Import) map[Var]*usedRef { - - globals := make(map[Var]*usedRef, len(rules)) // NB: might grow bigger with imports - - // Populate globals with exports within the package. - for _, ref := range rules { - v := ref[0].Value.(Var) - globals[v] = &usedRef{ref: pkg.Path.Append(StringTerm(string(v)))} - } - - // Populate globals with imports. - for _, imp := range imports { - path := imp.Path.Value.(Ref) - if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { - continue // ignore future and rego imports - } - globals[imp.Name()] = &usedRef{ref: path} - } - - return globals -} - -func requiresEval(x *Term) bool { - if x == nil { - return false - } - return ContainsRefs(x) || ContainsComprehensions(x) -} - -func resolveRef(globals map[Var]*usedRef, ignore *declaredVarStack, ref Ref) Ref { - - r := Ref{} - for i, x := range ref { - switch v := x.Value.(type) { - case Var: - if g, ok := globals[v]; ok && !ignore.Contains(v) { - cpy := g.ref.Copy() - for i := range cpy { - cpy[i].SetLocation(x.Location) - } - if i == 0 { - r = cpy - } else { - r = append(r, NewTerm(cpy).SetLocation(x.Location)) - } - g.used = true - } else { - r = append(r, x) - } - case Ref, *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: - r = append(r, resolveRefsInTerm(globals, ignore, x)) - default: - r = append(r, x) - } - } - - return r -} - -type usedRef struct { - ref Ref - used bool -} - -func resolveRefsInRule(globals map[Var]*usedRef, rule *Rule) error { - ignore := &declaredVarStack{} - - vars := NewVarSet() - var vis *GenericVisitor - var err error - - // Walk args to collect vars and transform body so that callers can shadow - // root documents. - vis = NewGenericVisitor(func(x interface{}) bool { - if err != nil { - return true - } - switch x := x.(type) { - case Var: - vars.Add(x) - - // Object keys cannot be pattern matched so only walk values. - case *object: - x.Foreach(func(_, v *Term) { - vis.Walk(v) - }) - - // Skip terms that could contain vars that cannot be pattern matched. - case Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: - return true - - case *Term: - if _, ok := x.Value.(Ref); ok { - if RootDocumentRefs.Contains(x) { - // We could support args named input, data, etc. however - // this would require rewriting terms in the head and body. - // Preventing root document shadowing is simpler, and - // arguably, will prevent confusing names from being used. - // NOTE: this check is also performed as part of strict-mode in - // checkRootDocumentOverrides. - err = fmt.Errorf("args must not shadow %v (use a different variable name)", x) - return true - } - } - } - return false - }) - - vis.Walk(rule.Head.Args) - - if err != nil { - return err - } - - ignore.Push(vars) - ignore.Push(declaredVars(rule.Body)) - - ref := rule.Head.Ref() - for i := 1; i < len(ref); i++ { - ref[i] = resolveRefsInTerm(globals, ignore, ref[i]) - } - if rule.Head.Key != nil { - rule.Head.Key = resolveRefsInTerm(globals, ignore, rule.Head.Key) - } - - if rule.Head.Value != nil { - rule.Head.Value = resolveRefsInTerm(globals, ignore, rule.Head.Value) - } - - rule.Body = resolveRefsInBody(globals, ignore, rule.Body) - return nil -} - -func resolveRefsInBody(globals map[Var]*usedRef, ignore *declaredVarStack, body Body) Body { - r := make([]*Expr, 0, len(body)) - for _, expr := range body { - r = append(r, resolveRefsInExpr(globals, ignore, expr)) - } - return r -} - -func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr *Expr) *Expr { - cpy := *expr - switch ts := expr.Terms.(type) { - case *Term: - cpy.Terms = resolveRefsInTerm(globals, ignore, ts) - case []*Term: - buf := make([]*Term, len(ts)) - for i := 0; i < len(ts); i++ { - buf[i] = resolveRefsInTerm(globals, ignore, ts[i]) - } - cpy.Terms = buf - case *SomeDecl: - if val, ok := ts.Symbols[0].Value.(Call); ok { - cpy.Terms = &SomeDecl{Symbols: []*Term{CallTerm(resolveRefsInTermSlice(globals, ignore, val)...)}} - } - case *Every: - locals := NewVarSet() - if ts.Key != nil { - locals.Update(ts.Key.Vars()) - } - locals.Update(ts.Value.Vars()) - ignore.Push(locals) - cpy.Terms = &Every{ - Key: ts.Key.Copy(), // TODO(sr): do more? - Value: ts.Value.Copy(), // TODO(sr): do more? - Domain: resolveRefsInTerm(globals, ignore, ts.Domain), - Body: resolveRefsInBody(globals, ignore, ts.Body), - } - ignore.Pop() - } - for _, w := range cpy.With { - w.Target = resolveRefsInTerm(globals, ignore, w.Target) - w.Value = resolveRefsInTerm(globals, ignore, w.Value) - } - return &cpy -} - -func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term *Term) *Term { - switch v := term.Value.(type) { - case Var: - if g, ok := globals[v]; ok && !ignore.Contains(v) { - cpy := g.ref.Copy() - for i := range cpy { - cpy[i].SetLocation(term.Location) - } - g.used = true - return NewTerm(cpy).SetLocation(term.Location) - } - return term - case Ref: - fqn := resolveRef(globals, ignore, v) - cpy := *term - cpy.Value = fqn - return &cpy - case *object: - cpy := *term - cpy.Value, _ = v.Map(func(k, v *Term) (*Term, *Term, error) { - k = resolveRefsInTerm(globals, ignore, k) - v = resolveRefsInTerm(globals, ignore, v) - return k, v, nil - }) - return &cpy - case *Array: - cpy := *term - cpy.Value = NewArray(resolveRefsInTermArray(globals, ignore, v)...) - return &cpy - case Call: - cpy := *term - cpy.Value = Call(resolveRefsInTermSlice(globals, ignore, v)) - return &cpy - case Set: - s, _ := v.Map(func(e *Term) (*Term, error) { - return resolveRefsInTerm(globals, ignore, e), nil - }) - cpy := *term - cpy.Value = s - return &cpy - case *ArrayComprehension: - ac := &ArrayComprehension{} - ignore.Push(declaredVars(v.Body)) - ac.Term = resolveRefsInTerm(globals, ignore, v.Term) - ac.Body = resolveRefsInBody(globals, ignore, v.Body) - cpy := *term - cpy.Value = ac - ignore.Pop() - return &cpy - case *ObjectComprehension: - oc := &ObjectComprehension{} - ignore.Push(declaredVars(v.Body)) - oc.Key = resolveRefsInTerm(globals, ignore, v.Key) - oc.Value = resolveRefsInTerm(globals, ignore, v.Value) - oc.Body = resolveRefsInBody(globals, ignore, v.Body) - cpy := *term - cpy.Value = oc - ignore.Pop() - return &cpy - case *SetComprehension: - sc := &SetComprehension{} - ignore.Push(declaredVars(v.Body)) - sc.Term = resolveRefsInTerm(globals, ignore, v.Term) - sc.Body = resolveRefsInBody(globals, ignore, v.Body) - cpy := *term - cpy.Value = sc - ignore.Pop() - return &cpy - default: - return term - } -} - -func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term { - cpy := make([]*Term, terms.Len()) - for i := 0; i < terms.Len(); i++ { - cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i)) - } - return cpy -} - -func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term { - cpy := make([]*Term, len(terms)) - for i := 0; i < len(terms); i++ { - cpy[i] = resolveRefsInTerm(globals, ignore, terms[i]) - } - return cpy -} - -type declaredVarStack []VarSet - -func (s declaredVarStack) Contains(v Var) bool { - for i := len(s) - 1; i >= 0; i-- { - if _, ok := s[i][v]; ok { - return ok - } - } - return false -} - -func (s declaredVarStack) Add(v Var) { - s[len(s)-1].Add(v) -} - -func (s *declaredVarStack) Push(vs VarSet) { - *s = append(*s, vs) -} - -func (s *declaredVarStack) Pop() { - curr := *s - *s = curr[:len(curr)-1] -} - -func declaredVars(x interface{}) VarSet { - vars := NewVarSet() - vis := NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case *Expr: - if x.IsAssignment() && validEqAssignArgCount(x) { - WalkVars(x.Operand(0), func(v Var) bool { - vars.Add(v) - return false - }) - } else if decl, ok := x.Terms.(*SomeDecl); ok { - for i := range decl.Symbols { - switch val := decl.Symbols[i].Value.(type) { - case Var: - vars.Add(val) - case Call: - args := val[1:] - if len(args) == 3 { // some x, y in xs - WalkVars(args[1], func(v Var) bool { - vars.Add(v) - return false - }) - } - // some x in xs - WalkVars(args[0], func(v Var) bool { - vars.Add(v) - return false - }) - } - } - } - case *ArrayComprehension, *SetComprehension, *ObjectComprehension: - return true - } - return false - }) - vis.Walk(x) - return vars -} - -// rewriteComprehensionTerms will rewrite comprehensions so that the term part -// is bound to a variable in the body. This allows any type of term to be used -// in the term part (even if the term requires evaluation.) -// -// For instance, given the following comprehension: -// -// [x[0] | x = y[_]; y = [1,2,3]] -// -// The comprehension would be rewritten as: -// -// [__local0__ | x = y[_]; y = [1,2,3]; __local0__ = x[0]] -func rewriteComprehensionTerms(f *equalityFactory, node interface{}) (interface{}, error) { - return TransformComprehensions(node, func(x interface{}) (Value, error) { - switch x := x.(type) { - case *ArrayComprehension: - if requiresEval(x.Term) { - expr := f.Generate(x.Term) - x.Term = expr.Operand(0) - x.Body.Append(expr) - } - return x, nil - case *SetComprehension: - if requiresEval(x.Term) { - expr := f.Generate(x.Term) - x.Term = expr.Operand(0) - x.Body.Append(expr) - } - return x, nil - case *ObjectComprehension: - if requiresEval(x.Key) { - expr := f.Generate(x.Key) - x.Key = expr.Operand(0) - x.Body.Append(expr) - } - if requiresEval(x.Value) { - expr := f.Generate(x.Value) - x.Value = expr.Operand(0) - x.Body.Append(expr) - } - return x, nil - } - panic("illegal type") - }) -} - -// rewriteEquals will rewrite exprs under x as unification calls instead of == -// calls. For example: -// -// data.foo == data.bar is rewritten as data.foo = data.bar -// -// This stage should only run the safety check (since == is a built-in with no -// outputs, so the inputs must not be marked as safe.) -// -// This stage is not executed by the query compiler by default because when -// callers specify == instead of = they expect to receive a true/false/undefined -// result back whereas with = the result is only ever true/undefined. For -// partial evaluation cases we do want to rewrite == to = to simplify the -// result. -func rewriteEquals(x interface{}) (modified bool) { - doubleEq := Equal.Ref() - unifyOp := Equality.Ref() - t := NewGenericTransformer(func(x interface{}) (interface{}, error) { - if x, ok := x.(*Expr); ok && x.IsCall() { - operator := x.Operator() - if operator.Equal(doubleEq) && len(x.Operands()) == 2 { - modified = true - x.SetOperator(NewTerm(unifyOp)) - } - } - return x, nil - }) - _, _ = Transform(t, x) // ignore error - return modified -} - -func rewriteTestEqualities(f *equalityFactory, body Body) Body { - result := make(Body, 0, len(body)) - for _, expr := range body { - // We can't rewrite negated expressions; if the extracted term is undefined, evaluation would fail before - // reaching the negation check. - if !expr.Negated && !expr.Generated { - switch { - case expr.IsEquality(): - terms := expr.Terms.([]*Term) - result, terms[1] = rewriteDynamicsShallow(expr, f, terms[1], result) - result, terms[2] = rewriteDynamicsShallow(expr, f, terms[2], result) - case expr.IsEvery(): - // We rewrite equalities inside of every-bodies as a fail here will be the cause of the test-rule fail. - // Failures inside other expressions with closures, such as comprehensions, won't cause the test-rule to fail, so we skip those. - every := expr.Terms.(*Every) - every.Body = rewriteTestEqualities(f, every.Body) - } - } - result = appendExpr(result, expr) - } - return result -} - -func rewriteDynamicsShallow(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { - switch term.Value.(type) { - case Ref, *ArrayComprehension, *SetComprehension, *ObjectComprehension: - generated := f.Generate(term) - generated.With = original.With - result.Append(generated) - connectGeneratedExprs(original, generated) - return result, result[len(result)-1].Operand(0) - } - return result, term -} - -// rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and -// comprehensions) are bound to vars earlier in the query. This translation -// results in eager evaluation. -// -// For instance, given the following query: -// -// foo(data.bar) = 1 -// -// The rewritten version will be: -// -// __local0__ = data.bar; foo(__local0__) = 1 -func rewriteDynamics(f *equalityFactory, body Body) Body { - result := make(Body, 0, len(body)) - for _, expr := range body { - switch { - case expr.IsEquality(): - result = rewriteDynamicsEqExpr(f, expr, result) - case expr.IsCall(): - result = rewriteDynamicsCallExpr(f, expr, result) - case expr.IsEvery(): - result = rewriteDynamicsEveryExpr(f, expr, result) - default: - result = rewriteDynamicsTermExpr(f, expr, result) - } - } - return result -} - -func appendExpr(body Body, expr *Expr) Body { - body.Append(expr) - return body -} - -func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body { - if !validEqAssignArgCount(expr) { - return appendExpr(result, expr) - } - terms := expr.Terms.([]*Term) - result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result) - result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result) - return appendExpr(result, expr) -} - -func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body { - terms := expr.Terms.([]*Term) - for i := 1; i < len(terms); i++ { - result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result) - } - return appendExpr(result, expr) -} - -func rewriteDynamicsEveryExpr(f *equalityFactory, expr *Expr, result Body) Body { - ev := expr.Terms.(*Every) - result, ev.Domain = rewriteDynamicsOne(expr, f, ev.Domain, result) - ev.Body = rewriteDynamics(f, ev.Body) - return appendExpr(result, expr) -} - -func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body { - term := expr.Terms.(*Term) - result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result) - return appendExpr(result, expr) -} - -func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { - switch v := term.Value.(type) { - case Ref: - for i := 1; i < len(v); i++ { - result, v[i] = rewriteDynamicsOne(original, f, v[i], result) - } - case *ArrayComprehension: - v.Body = rewriteDynamics(f, v.Body) - case *SetComprehension: - v.Body = rewriteDynamics(f, v.Body) - case *ObjectComprehension: - v.Body = rewriteDynamics(f, v.Body) - default: - result, term = rewriteDynamicsOne(original, f, term, result) - } - return result, term -} - -func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { - switch v := term.Value.(type) { - case Ref: - for i := 1; i < len(v); i++ { - result, v[i] = rewriteDynamicsOne(original, f, v[i], result) - } - generated := f.Generate(term) - generated.With = original.With - result.Append(generated) - connectGeneratedExprs(original, generated) - return result, result[len(result)-1].Operand(0) - case *Array: - for i := 0; i < v.Len(); i++ { - var t *Term - result, t = rewriteDynamicsOne(original, f, v.Elem(i), result) - v.set(i, t) - } - return result, term - case *object: - cpy := NewObject() - v.Foreach(func(key, value *Term) { - result, key = rewriteDynamicsOne(original, f, key, result) - result, value = rewriteDynamicsOne(original, f, value, result) - cpy.Insert(key, value) - }) - return result, NewTerm(cpy).SetLocation(term.Location) - case Set: - cpy := NewSet() - for _, term := range v.Slice() { - var rw *Term - result, rw = rewriteDynamicsOne(original, f, term, result) - cpy.Add(rw) - } - return result, NewTerm(cpy).SetLocation(term.Location) - case *ArrayComprehension: - var extra *Expr - v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) - result.Append(extra) - connectGeneratedExprs(original, extra) - return result, result[len(result)-1].Operand(0) - case *SetComprehension: - var extra *Expr - v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) - result.Append(extra) - connectGeneratedExprs(original, extra) - return result, result[len(result)-1].Operand(0) - case *ObjectComprehension: - var extra *Expr - v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) - result.Append(extra) - connectGeneratedExprs(original, extra) - return result, result[len(result)-1].Operand(0) - } - return result, term -} - -func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body Body, term *Term) (Body, *Expr) { - body = rewriteDynamics(f, body) - generated := f.Generate(term) - generated.With = original.With - return body, generated -} - -func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) { - for i := range rule.Head.Args { - support, output := expandExprTerm(gen, rule.Head.Args[i]) - for j := range support { - rule.Body.Append(support[j]) - } - rule.Head.Args[i] = output - } - if rule.Head.Key != nil { - support, output := expandExprTerm(gen, rule.Head.Key) - for i := range support { - rule.Body.Append(support[i]) - } - rule.Head.Key = output - } - if rule.Head.Value != nil { - support, output := expandExprTerm(gen, rule.Head.Value) - for i := range support { - rule.Body.Append(support[i]) - } - rule.Head.Value = output - } -} - -func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body { - cpy := make(Body, 0, len(body)) - for i := 0; i < len(body); i++ { - for _, expr := range expandExpr(gen, body[i]) { - cpy.Append(expr) - } - } - return cpy -} - -func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { - for i := range expr.With { - extras, value := expandExprTerm(gen, expr.With[i].Value) - expr.With[i].Value = value - result = append(result, extras...) - } - switch terms := expr.Terms.(type) { - case *Term: - extras, term := expandExprTerm(gen, terms) - if len(expr.With) > 0 { - for i := range extras { - extras[i].With = expr.With - } - } - result = append(result, extras...) - expr.Terms = term - result = append(result, expr) - case []*Term: - for i := 1; i < len(terms); i++ { - var extras []*Expr - extras, terms[i] = expandExprTerm(gen, terms[i]) - connectGeneratedExprs(expr, extras...) - if len(expr.With) > 0 { - for i := range extras { - extras[i].With = expr.With - } - } - result = append(result, extras...) - } - result = append(result, expr) - case *Every: - var extras []*Expr - - term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location) - eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location) - eq.Generated = true - eq.With = expr.With - extras = expandExpr(gen, eq) - terms.Domain = term - - terms.Body = rewriteExprTermsInBody(gen, terms.Body) - result = append(result, extras...) - result = append(result, expr) - } - return -} - -func connectGeneratedExprs(parent *Expr, children ...*Expr) { - for _, child := range children { - child.generatedFrom = parent - parent.generates = append(parent.generates, child) - } -} - -func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) { - output = term - switch v := term.Value.(type) { - case Call: - for i := 1; i < len(v); i++ { - var extras []*Expr - extras, v[i] = expandExprTerm(gen, v[i]) - support = append(support, extras...) - } - output = NewTerm(gen.Generate()).SetLocation(term.Location) - expr := v.MakeExpr(output).SetLocation(term.Location) - expr.Generated = true - support = append(support, expr) - case Ref: - support = expandExprRef(gen, v) - case *Array: - support = expandExprTermArray(gen, v) - case *object: - cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { - extras1, expandedKey := expandExprTerm(gen, k) - extras2, expandedValue := expandExprTerm(gen, v) - support = append(support, extras1...) - support = append(support, extras2...) - return expandedKey, expandedValue, nil - }) - output = NewTerm(cpy).SetLocation(term.Location) - case Set: - cpy, _ := v.Map(func(x *Term) (*Term, error) { - extras, expanded := expandExprTerm(gen, x) - support = append(support, extras...) - return expanded, nil - }) - output = NewTerm(cpy).SetLocation(term.Location) - case *ArrayComprehension: - support, term := expandExprTerm(gen, v.Term) - for i := range support { - v.Body.Append(support[i]) - } - v.Term = term - v.Body = rewriteExprTermsInBody(gen, v.Body) - case *SetComprehension: - support, term := expandExprTerm(gen, v.Term) - for i := range support { - v.Body.Append(support[i]) - } - v.Term = term - v.Body = rewriteExprTermsInBody(gen, v.Body) - case *ObjectComprehension: - support, key := expandExprTerm(gen, v.Key) - for i := range support { - v.Body.Append(support[i]) - } - v.Key = key - support, value := expandExprTerm(gen, v.Value) - for i := range support { - v.Body.Append(support[i]) - } - v.Value = value - v.Body = rewriteExprTermsInBody(gen, v.Body) - } - return -} - -func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) { - // Start by calling a normal expandExprTerm on all terms. - support = expandExprTermSlice(gen, v) - - // Rewrite references in order to support indirect references. We rewrite - // e.g. - // - // [1, 2, 3][i] - // - // to - // - // __local_var = [1, 2, 3] - // __local_var[i] - // - // to support these. This only impacts the reference subject, i.e. the - // first item in the slice. - var subject = v[0] - switch subject.Value.(type) { - case *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: - f := newEqualityFactory(gen) - assignToLocal := f.Generate(subject) - support = append(support, assignToLocal) - v[0] = assignToLocal.Operand(0) - } - return -} - -func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) { - for i := 0; i < arr.Len(); i++ { - extras, v := expandExprTerm(gen, arr.Elem(i)) - arr.set(i, v) - support = append(support, extras...) - } - return -} - -func expandExprTermSlice(gen *localVarGenerator, v []*Term) (support []*Expr) { - for i := 0; i < len(v); i++ { - var extras []*Expr - extras, v[i] = expandExprTerm(gen, v[i]) - support = append(support, extras...) - } - return -} - -type localDeclaredVars struct { - vars []*declaredVarSet - - // rewritten contains a mapping of *all* user-defined variables - // that have been rewritten whereas vars contains the state - // from the current query (not any nested queries, and all vars - // seen). - rewritten map[Var]Var - - // indicates if an assignment (:= operator) has been seen *ever* - assignment bool -} - -type varOccurrence int - -const ( - newVar varOccurrence = iota - argVar - seenVar - assignedVar - declaredVar -) - -type declaredVarSet struct { - vs map[Var]Var - reverse map[Var]Var - occurrence map[Var]varOccurrence - count map[Var]int -} - -func newDeclaredVarSet() *declaredVarSet { - return &declaredVarSet{ - vs: map[Var]Var{}, - reverse: map[Var]Var{}, - occurrence: map[Var]varOccurrence{}, - count: map[Var]int{}, - } -} - -func newLocalDeclaredVars() *localDeclaredVars { - return &localDeclaredVars{ - vars: []*declaredVarSet{newDeclaredVarSet()}, - rewritten: map[Var]Var{}, - } -} - -func (s *localDeclaredVars) Copy() *localDeclaredVars { - stack := &localDeclaredVars{ - vars: []*declaredVarSet{}, - rewritten: map[Var]Var{}, - } - - for i := range s.vars { - stack.vars = append(stack.vars, newDeclaredVarSet()) - for k, v := range s.vars[i].vs { - stack.vars[0].vs[k] = v - } - for k, v := range s.vars[i].reverse { - stack.vars[0].reverse[k] = v - } - for k, v := range s.vars[i].count { - stack.vars[0].count[k] = v - } - for k, v := range s.vars[i].occurrence { - stack.vars[0].occurrence[k] = v - } - } - - for k, v := range s.rewritten { - stack.rewritten[k] = v - } - - return stack -} - -func (s *localDeclaredVars) Push() { - s.vars = append(s.vars, newDeclaredVarSet()) -} - -func (s *localDeclaredVars) Pop() *declaredVarSet { - sl := s.vars - curr := sl[len(sl)-1] - s.vars = sl[:len(sl)-1] - return curr -} - -func (s localDeclaredVars) Peek() *declaredVarSet { - return s.vars[len(s.vars)-1] -} - -func (s localDeclaredVars) Insert(x, y Var, occurrence varOccurrence) { - elem := s.vars[len(s.vars)-1] - elem.vs[x] = y - elem.reverse[y] = x - elem.occurrence[x] = occurrence - - elem.count[x] = 1 - - // If the variable has been rewritten (where x != y, with y being - // the generated value), store it in the map of rewritten vars. - // Assume that the generated values are unique for the compilation. - if !x.Equal(y) { - s.rewritten[y] = x - } -} - -func (s localDeclaredVars) Declared(x Var) (y Var, ok bool) { - for i := len(s.vars) - 1; i >= 0; i-- { - if y, ok = s.vars[i].vs[x]; ok { - return - } - } - return -} - -// Occurrence returns a flag that indicates whether x has occurred in the -// current scope. -func (s localDeclaredVars) Occurrence(x Var) varOccurrence { - return s.vars[len(s.vars)-1].occurrence[x] -} - -// GlobalOccurrence returns a flag that indicates whether x has occurred in the -// global scope. -func (s localDeclaredVars) GlobalOccurrence(x Var) (varOccurrence, bool) { - for i := len(s.vars) - 1; i >= 0; i-- { - if occ, ok := s.vars[i].occurrence[x]; ok { - return occ, true - } - } - return newVar, false -} - -// Seen marks x as seen by incrementing its counter -func (s localDeclaredVars) Seen(x Var) { - for i := len(s.vars) - 1; i >= 0; i-- { - dvs := s.vars[i] - if c, ok := dvs.count[x]; ok { - dvs.count[x] = c + 1 - return - } - } - - s.vars[len(s.vars)-1].count[x] = 1 -} - -// Count returns how many times x has been seen -func (s localDeclaredVars) Count(x Var) int { - for i := len(s.vars) - 1; i >= 0; i-- { - if c, ok := s.vars[i].count[x]; ok { - return c - } - } - - return 0 -} - -// rewriteLocalVars rewrites bodies to remove assignment/declaration -// expressions. For example: -// -// a := 1; p[a] -// -// Is rewritten to: -// -// __local0__ = 1; p[__local0__] -// -// During rewriting, assignees are validated to prevent use before declaration. -func rewriteLocalVars(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, strict bool) (Body, map[Var]Var, Errors) { - var errs Errors - body, errs = rewriteDeclaredVarsInBody(g, stack, used, body, errs, strict) - return body, stack.Peek().vs, errs -} - -func rewriteDeclaredVarsInBody(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, errs Errors, strict bool) (Body, Errors) { - - var cpy Body - - for i := range body { - var expr *Expr - switch { - case body[i].IsAssignment(): - stack.assignment = true - expr, errs = rewriteDeclaredAssignment(g, stack, body[i], errs, strict) - case body[i].IsSome(): - expr, errs = rewriteSomeDeclStatement(g, stack, body[i], errs, strict) - case body[i].IsEvery(): - expr, errs = rewriteEveryStatement(g, stack, body[i], errs, strict) - default: - expr, errs = rewriteDeclaredVarsInExpr(g, stack, body[i], errs, strict) - } - if expr != nil { - cpy.Append(expr) - } - } - - // If the body only contained a var statement it will be empty at this - // point. Append true to the body to ensure that it's non-empty (zero length - // bodies are not supported.) - if len(cpy) == 0 { - cpy.Append(NewExpr(BooleanTerm(true))) - } - - errs = checkUnusedAssignedVars(body, stack, used, errs, strict) - return cpy, checkUnusedDeclaredVars(body, stack, used, cpy, errs) -} - -func checkUnusedAssignedVars(body Body, stack *localDeclaredVars, used VarSet, errs Errors, strict bool) Errors { - - if !strict || len(errs) > 0 { - return errs - } - - dvs := stack.Peek() - unused := NewVarSet() - - for v, occ := range dvs.occurrence { - // A var that was assigned in this scope must have been seen (used) more than once (the time of assignment) in - // the same, or nested, scope to be counted as used. - if !v.IsWildcard() && stack.Count(v) <= 1 && occ == assignedVar { - unused.Add(dvs.vs[v]) - } - } - - rewrittenUsed := NewVarSet() - for v := range used { - if gv, ok := stack.Declared(v); ok { - rewrittenUsed.Add(gv) - } else { - rewrittenUsed.Add(v) - } - } - - unused = unused.Diff(rewrittenUsed) - - for _, gv := range unused.Sorted() { - found := false - for i := range body { - if body[i].Vars(VarVisitorParams{}).Contains(gv) { - errs = append(errs, NewError(CompileErr, body[i].Loc(), "assigned var %v unused", dvs.reverse[gv])) - found = true - break - } - } - if !found { - errs = append(errs, NewError(CompileErr, body[0].Loc(), "assigned var %v unused", dvs.reverse[gv])) - } - } - - return errs -} - -func checkUnusedDeclaredVars(body Body, stack *localDeclaredVars, used VarSet, cpy Body, errs Errors) Errors { - - // NOTE(tsandall): Do not generate more errors if there are existing - // declaration errors. - if len(errs) > 0 { - return errs - } - - dvs := stack.Peek() - declared := NewVarSet() - - for v, occ := range dvs.occurrence { - if occ == declaredVar { - declared.Add(dvs.vs[v]) - } - } - - bodyvars := cpy.Vars(VarVisitorParams{}) - - for v := range used { - if gv, ok := stack.Declared(v); ok { - bodyvars.Add(gv) - } else { - bodyvars.Add(v) - } - } - - unused := declared.Diff(bodyvars).Diff(used) - - for _, gv := range unused.Sorted() { - rv := dvs.reverse[gv] - if !rv.IsGenerated() { - // Scan through body exprs, looking for a match between the - // bad var's original name, and each expr's declared vars. - foundUnusedVarByName := false - for i := range body { - varsDeclaredInExpr := declaredVars(body[i]) - if varsDeclaredInExpr.Contains(dvs.reverse[gv]) { - // TODO(philipc): Clean up the offset logic here when the parser - // reports more accurate locations. - errs = append(errs, NewError(CompileErr, body[i].Loc(), "declared var %v unused", dvs.reverse[gv])) - foundUnusedVarByName = true - break - } - } - // Default error location returned. - if !foundUnusedVarByName { - errs = append(errs, NewError(CompileErr, body[0].Loc(), "declared var %v unused", dvs.reverse[gv])) - } - } - } - - return errs -} - -func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - e := expr.Copy() - every := e.Terms.(*Every) - - errs = rewriteDeclaredVarsInTermRecursive(g, stack, every.Domain, errs, strict) - - stack.Push() - defer stack.Pop() - - // if the key exists, rewrite - if every.Key != nil { - if v := every.Key.Value.(Var); !v.IsWildcard() { - gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) - if err != nil { - return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) - } - every.Key.Value = gv - } - } else { // if the key doesn't exist, add dummy local - every.Key = NewTerm(g.Generate()) - } - - // value is always present - if v := every.Value.Value.(Var); !v.IsWildcard() { - gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) - if err != nil { - return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) - } - every.Value.Value = gv - } - - used := NewVarSet() - every.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, every.Body, errs, strict) - - return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) -} - -func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - e := expr.Copy() - decl := e.Terms.(*SomeDecl) - for i := range decl.Symbols { - switch v := decl.Symbols[i].Value.(type) { - case Var: - if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil { - return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) - } - case Call: - var key, val, container *Term - switch len(v) { - case 4: // member3 - key = v[1] - val = v[2] - container = v[3] - case 3: // member - key = NewTerm(g.Generate()) - val = v[1] - container = v[2] - } - - var rhs *Term - switch c := container.Value.(type) { - case Ref: - rhs = RefTerm(append(c, key)...) - default: - rhs = RefTerm(container, key) - } - e.Terms = []*Term{ - RefTerm(VarTerm(Equality.Name)), val, rhs, - } - - for _, v0 := range outputVarsForExprEq(e, container.Vars()).Sorted() { - if _, err := rewriteDeclaredVar(g, stack, v0, declaredVar); err != nil { - return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) - } - } - return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) - } - } - return nil, errs -} - -func rewriteDeclaredVarsInExpr(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - vis := NewGenericVisitor(func(x interface{}) bool { - var stop bool - switch x := x.(type) { - case *Term: - stop, errs = rewriteDeclaredVarsInTerm(g, stack, x, errs, strict) - case *With: - stop, errs = true, rewriteDeclaredVarsInWithRecursive(g, stack, x, errs, strict) - } - return stop - }) - vis.Walk(expr) - return expr, errs -} - -func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - - if expr.Negated { - errs = append(errs, NewError(CompileErr, expr.Location, "cannot assign vars inside negated expression")) - return expr, errs - } - - numErrsBefore := len(errs) - - if !validEqAssignArgCount(expr) { - return expr, errs - } - - // Rewrite terms on right hand side capture seen vars and recursively - // process comprehensions before left hand side is processed. Also - // rewrite with modifier. - errs = rewriteDeclaredVarsInTermRecursive(g, stack, expr.Operand(1), errs, strict) - - for _, w := range expr.With { - errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) - } - - // Rewrite vars on left hand side with unique names. Catch redeclaration - // and invalid term types here. - var vis func(t *Term) bool - - vis = func(t *Term) bool { - switch v := t.Value.(type) { - case Var: - if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil { - errs = append(errs, NewError(CompileErr, t.Location, err.Error())) - } else { - t.Value = gv - } - return true - case *Array: - return false - case *object: - v.Foreach(func(_, v *Term) { - WalkTerms(v, vis) - }) - return true - case Ref: - if RootDocumentRefs.Contains(t) { - if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil { - errs = append(errs, NewError(CompileErr, t.Location, err.Error())) - } else { - t.Value = gv - } - return true - } - } - errs = append(errs, NewError(CompileErr, t.Location, "cannot assign to %v", TypeName(t.Value))) - return true - } - - WalkTerms(expr.Operand(0), vis) - - if len(errs) == numErrsBefore { - loc := expr.Operator()[0].Location - expr.SetOperator(RefTerm(VarTerm(Equality.Name).SetLocation(loc)).SetLocation(loc)) - } - - return expr, errs -} - -func rewriteDeclaredVarsInTerm(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) (bool, Errors) { - switch v := term.Value.(type) { - case Var: - if gv, ok := stack.Declared(v); ok { - term.Value = gv - stack.Seen(v) - } else if stack.Occurrence(v) == newVar { - stack.Insert(v, v, seenVar) - } - case Ref: - if RootDocumentRefs.Contains(term) { - x := v[0].Value.(Var) - if occ, ok := stack.GlobalOccurrence(x); ok && occ != seenVar { - gv, _ := stack.Declared(x) - term.Value = gv - } - - return true, errs - } - return false, errs - case Call: - ref := v[0] - WalkVars(ref, func(v Var) bool { - if gv, ok := stack.Declared(v); ok && !gv.Equal(v) { - // We will rewrite the ref of a function call, which is never ok since we don't have first-class functions. - errs = append(errs, NewError(CompileErr, term.Location, "called function %s shadowed", ref)) - return true - } - return false - }) - return false, errs - case *object: - cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { - kcpy := k.Copy() - errs = rewriteDeclaredVarsInTermRecursive(g, stack, kcpy, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v, errs, strict) - return kcpy, v, nil - }) - term.Value = cpy - case Set: - cpy, _ := v.Map(func(elem *Term) (*Term, error) { - elemcpy := elem.Copy() - errs = rewriteDeclaredVarsInTermRecursive(g, stack, elemcpy, errs, strict) - return elemcpy, nil - }) - term.Value = cpy - case *ArrayComprehension: - errs = rewriteDeclaredVarsInArrayComprehension(g, stack, v, errs, strict) - case *SetComprehension: - errs = rewriteDeclaredVarsInSetComprehension(g, stack, v, errs, strict) - case *ObjectComprehension: - errs = rewriteDeclaredVarsInObjectComprehension(g, stack, v, errs, strict) - default: - return false, errs - } - return true, errs -} - -func rewriteDeclaredVarsInTermRecursive(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) Errors { - WalkTerms(term, func(t *Term) bool { - var stop bool - stop, errs = rewriteDeclaredVarsInTerm(g, stack, t, errs, strict) - return stop - }) - return errs -} - -func rewriteDeclaredVarsInWithRecursive(g *localVarGenerator, stack *localDeclaredVars, w *With, errs Errors, strict bool) Errors { - // NOTE(sr): `with input as` and `with input.a.b.c as` are deliberately skipped here: `input` could - // have been shadowed by a local variable/argument but should NOT be replaced in the `with` target. - // - // We cannot drop `input` from the stack since it's conceivable to do `with input[input] as` where - // the second input is meant to be the local var. It's a terrible idea, but when you're shadowing - // `input` those might be your thing. - errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Target, errs, strict) - if sdwInput, ok := stack.Declared(InputRootDocument.Value.(Var)); ok { // Was "input" shadowed... - switch value := w.Target.Value.(type) { - case Var: - if sdwInput.Equal(value) { // ...and replaced? If so, fix it - w.Target.Value = InputRootRef - } - case Ref: - if sdwInput.Equal(value[0].Value.(Var)) { - w.Target.Value.(Ref)[0].Value = InputRootDocument.Value - } - } - } - // No special handling of the `with` value - return rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) -} - -func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Term.Vars()) - - stack.Push() - v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) - stack.Pop() - return errs -} - -func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Term.Vars()) - - stack.Push() - v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) - stack.Pop() - return errs -} - -func rewriteDeclaredVarsInObjectComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ObjectComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Key.Vars()) - used.Update(v.Value.Vars()) - - stack.Push() - v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Key, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Value, errs, strict) - stack.Pop() - return errs -} - -func rewriteDeclaredVar(g *localVarGenerator, stack *localDeclaredVars, v Var, occ varOccurrence) (gv Var, err error) { - switch stack.Occurrence(v) { - case seenVar: - return gv, fmt.Errorf("var %v referenced above", v) - case assignedVar: - return gv, fmt.Errorf("var %v assigned above", v) - case declaredVar: - return gv, fmt.Errorf("var %v declared above", v) - case argVar: - return gv, fmt.Errorf("arg %v redeclared", v) - } - gv = g.Generate() - stack.Insert(v, gv, occ) - return -} - -// rewriteWithModifiersInBody will rewrite the body so that with modifiers do -// not contain terms that require evaluation as values. If this function -// encounters an invalid with modifier target then it will raise an error. -func rewriteWithModifiersInBody(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, body Body) (Body, *Error) { - var result Body - for i := range body { - exprs, err := rewriteWithModifier(c, unsafeBuiltinsMap, f, body[i]) - if err != nil { - return nil, err - } - if len(exprs) > 0 { - for _, expr := range exprs { - result.Append(expr) - } - } else { - result.Append(body[i]) - } - } - return result, nil -} - -func rewriteWithModifier(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, expr *Expr) ([]*Expr, *Error) { - - var result []*Expr - for i := range expr.With { - eval, err := validateWith(c, unsafeBuiltinsMap, expr, i) - if err != nil { - return nil, err - } - - if eval { - eq := f.Generate(expr.With[i].Value) - result = append(result, eq) - expr.With[i].Value = eq.Operand(0) - } - } - - return append(result, expr), nil -} - -func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr, i int) (bool, *Error) { - target, value := expr.With[i].Target, expr.With[i].Value - - // Ensure that values that are built-ins are rewritten to Ref (not Var) - if v, ok := value.Value.(Var); ok { - if _, ok := c.builtins[v.String()]; ok { - value.Value = Ref([]*Term{NewTerm(v)}) - } - } - isBuiltinRefOrVar, err := isBuiltinRefOrVar(c.builtins, unsafeBuiltinsMap, target) - if err != nil { - return false, err - } - - isAllowedUnknownFuncCall := false - if c.allowUndefinedFuncCalls { - switch target.Value.(type) { - case Ref, Var: - isAllowedUnknownFuncCall = true - } - } - - switch { - case isDataRef(target): - ref := target.Value.(Ref) - targetNode := c.RuleTree - for i := 0; i < len(ref)-1; i++ { - child := targetNode.Child(ref[i].Value) - if child == nil { - break - } else if len(child.Values) > 0 { - return false, NewError(CompileErr, target.Loc(), "with keyword cannot partially replace virtual document(s)") - } - targetNode = child - } - - if targetNode != nil { - // NOTE(sr): at this point in the compiler stages, we don't have a fully-populated - // TypeEnv yet -- so we have to make do with this check to see if the replacement - // target is a function. It's probably wrong for arity-0 functions, but those are - // and edge case anyways. - if child := targetNode.Child(ref[len(ref)-1].Value); child != nil { - for _, v := range child.Values { - if len(v.(*Rule).Head.Args) > 0 { - if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { - return false, err // err may be nil - } - } - } - } - } - - // If the with-value is a ref to a function, but not a call, we can't rewrite it - if r, ok := value.Value.(Ref); ok { - // TODO: check that target ref doesn't exist? - if valueNode := c.RuleTree.Find(r); valueNode != nil { - for _, v := range valueNode.Values { - if len(v.(*Rule).Head.Args) > 0 { - return false, nil - } - } - } - } - case isInputRef(target): // ok, valid - case isBuiltinRefOrVar: - - // NOTE(sr): first we ensure that parsed Var builtins (`count`, `concat`, etc) - // are rewritten to their proper Ref convention - if v, ok := target.Value.(Var); ok { - target.Value = Ref([]*Term{NewTerm(v)}) - } - - targetRef := target.Value.(Ref) - bi := c.builtins[targetRef.String()] // safe because isBuiltinRefOrVar checked this - if err := validateWithBuiltinTarget(bi, targetRef, target.Loc()); err != nil { - return false, err - } - - if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { - return false, err // err may be nil - } - case isAllowedUnknownFuncCall: - // The target isn't a ref to the input doc, data doc, or a known built-in, but it might be a ref to an unknown built-in. - return false, nil - default: - return false, NewError(TypeErr, target.Location, "with keyword target must reference existing %v, %v, or a function", InputRootDocument, DefaultRootDocument) - } - return requiresEval(value), nil -} - -func validateWithBuiltinTarget(bi *Builtin, target Ref, loc *location.Location) *Error { - switch bi.Name { - case Equality.Name, - RegoMetadataChain.Name, - RegoMetadataRule.Name: - return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of %q invalid", bi.Name) - } - - switch { - case target.HasPrefix(Ref([]*Term{VarTerm("internal")})): - return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of internal function %q invalid", target) - - case bi.Relation: - return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a relation") - - case bi.Decl.Result() == nil: - return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a void function") - } - return nil -} - -func validateWithFunctionValue(bs map[string]*Builtin, unsafeMap map[string]struct{}, ruleTree *TreeNode, value *Term) (bool, *Error) { - if v, ok := value.Value.(Ref); ok { - if ruleTree.Find(v) != nil { // ref exists in rule tree - return true, nil - } - } - return isBuiltinRefOrVar(bs, unsafeMap, value) -} - -func isInputRef(term *Term) bool { - if ref, ok := term.Value.(Ref); ok { - if ref.HasPrefix(InputRootRef) { - return true - } - } - return false -} - -func isDataRef(term *Term) bool { - if ref, ok := term.Value.(Ref); ok { - if ref.HasPrefix(DefaultRootRef) { - return true - } - } - return false -} - -func isBuiltinRefOrVar(bs map[string]*Builtin, unsafeBuiltinsMap map[string]struct{}, term *Term) (bool, *Error) { - switch v := term.Value.(type) { - case Ref, Var: - if _, ok := unsafeBuiltinsMap[v.String()]; ok { - return false, NewError(CompileErr, term.Location, "with keyword replacing built-in function: target must not be unsafe: %q", v) - } - _, ok := bs[v.String()] - return ok, nil - } - return false, nil -} - -func isVirtual(node *TreeNode, ref Ref) bool { - for i := range ref { - child := node.Child(ref[i].Value) - if child == nil { - return false - } else if len(child.Values) > 0 { - return true - } - node = child - } - return true -} - -func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) { - - if len(unsafe) == 0 { - return - } - - for _, pair := range unsafe.Vars() { - v := pair.Var - if w, ok := rewritten[v]; ok { - v = w - } - if !v.IsGenerated() { - if _, ok := futureKeywords[string(v)]; ok { - result = append(result, NewError(UnsafeVarErr, pair.Loc, - "var %[1]v is unsafe (hint: `import future.keywords.%[1]v` to import a future keyword)", v)) - continue - } - result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %v is unsafe", v)) - } - } - - if len(result) > 0 { - return - } - - // If the expression contains unsafe generated variables, report which - // expressions are unsafe instead of the variables that are unsafe (since - // the latter are not meaningful to the user.) - pairs := unsafe.Slice() - - sort.Slice(pairs, func(i, j int) bool { - return pairs[i].Expr.Location.Compare(pairs[j].Expr.Location) < 0 - }) - - // Report at most one error per generated variable. - seen := NewVarSet() - - for _, expr := range pairs { - before := len(seen) - for v := range expr.Vars { - if v.IsGenerated() { - seen.Add(v) - } - } - if len(seen) > before { - result = append(result, NewError(UnsafeVarErr, expr.Expr.Location, "expression is unsafe")) - } - } - - return -} - -func checkUnsafeBuiltins(unsafeBuiltinsMap map[string]struct{}, node interface{}) Errors { - errs := make(Errors, 0) - WalkExprs(node, func(x *Expr) bool { - if x.IsCall() { - operator := x.Operator().String() - if _, ok := unsafeBuiltinsMap[operator]; ok { - errs = append(errs, NewError(TypeErr, x.Loc(), "unsafe built-in function calls in expression: %v", operator)) - } - } - return false - }) - return errs -} - -func rewriteVarsInRef(vars ...map[Var]Var) varRewriter { - return func(node Ref) Ref { - i, _ := TransformVars(node, func(v Var) (Value, error) { - for _, m := range vars { - if u, ok := m[v]; ok { - return u, nil - } - } - return v, nil - }) - return i.(Ref) - } -} - -// NOTE(sr): This is duplicated with compile/compile.go; but moving it into another location -// would cause a circular dependency -- the refSet definition needs ast.Ref. If we make it -// public in the ast package, the compile package could take it from there, but it would also -// increase our public interface. Let's reconsider if we need it in a third place. -type refSet struct { - s []Ref -} - -func newRefSet(x ...Ref) *refSet { - result := &refSet{} - for i := range x { - result.AddPrefix(x[i]) - } - return result -} - -// ContainsPrefix returns true if r is prefixed by any of the existing refs in the set. -func (rs *refSet) ContainsPrefix(r Ref) bool { - for i := range rs.s { - if r.HasPrefix(rs.s[i]) { - return true - } - } - return false -} - -// AddPrefix inserts r into the set if r is not prefixed by any existing -// refs in the set. If any existing refs are prefixed by r, those existing -// refs are removed. -func (rs *refSet) AddPrefix(r Ref) { - if rs.ContainsPrefix(r) { - return - } - cpy := []Ref{r} - for i := range rs.s { - if !rs.s[i].HasPrefix(r) { - cpy = append(cpy, rs.s[i]) - } - } - rs.s = cpy -} - -// Sorted returns a sorted slice of terms for refs in the set. -func (rs *refSet) Sorted() []*Term { - terms := make([]*Term, len(rs.s)) - for i := range rs.s { - terms[i] = NewTerm(rs.s[i]) - } - sort.Slice(terms, func(i, j int) bool { - return terms[i].Value.Compare(terms[j].Value) < 0 - }) - return terms +// OutputVarsFromExpr returns all variables which are the "output" for +// the given expression. For safety checks this means that they would be +// made safe by the expr. +func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { + return v1.OutputVarsFromExpr(c, expr, safe) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go b/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go index dd48884f9d5..37ede329ea4 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go +++ b/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go @@ -4,41 +4,29 @@ package ast +import v1 "github.com/open-policy-agent/opa/v1/ast" + // CompileModules takes a set of Rego modules represented as strings and // compiles them for evaluation. The keys of the map are used as filenames. func CompileModules(modules map[string]string) (*Compiler, error) { - return CompileModulesWithOpt(modules, CompileOpts{}) + return CompileModulesWithOpt(modules, CompileOpts{ + ParserOptions: ParserOptions{ + RegoVersion: DefaultRegoVersion, + }, + }) } // CompileOpts defines a set of options for the compiler. -type CompileOpts struct { - EnablePrintStatements bool - ParserOptions ParserOptions -} +type CompileOpts = v1.CompileOpts // CompileModulesWithOpt takes a set of Rego modules represented as strings and // compiles them for evaluation. The keys of the map are used as filenames. func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compiler, error) { - - parsed := make(map[string]*Module, len(modules)) - - for f, module := range modules { - var pm *Module - var err error - if pm, err = ParseModuleWithOpts(f, module, opts.ParserOptions); err != nil { - return nil, err - } - parsed[f] = pm - } - - compiler := NewCompiler().WithEnablePrintStatements(opts.EnablePrintStatements) - compiler.Compile(parsed) - - if compiler.Failed() { - return nil, compiler.Errors + if opts.ParserOptions.RegoVersion == RegoUndefined { + opts.ParserOptions.RegoVersion = DefaultRegoVersion } - return compiler, nil + return v1.CompileModulesWithOpt(modules, opts) } // MustCompileModules compiles a set of Rego modules represented as strings. If diff --git a/vendor/github.com/open-policy-agent/opa/ast/conflicts.go b/vendor/github.com/open-policy-agent/opa/ast/conflicts.go index c2713ad576f..10edce382c3 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/conflicts.go +++ b/vendor/github.com/open-policy-agent/opa/ast/conflicts.go @@ -5,49 +5,11 @@ package ast import ( - "strings" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // CheckPathConflicts returns a set of errors indicating paths that // are in conflict with the result of the provided callable. func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors { - var errs Errors - - root := c.RuleTree.Child(DefaultRootDocument.Value) - if root == nil { - return nil - } - - for _, node := range root.Children { - errs = append(errs, checkDocumentConflicts(node, exists, nil)...) - } - - return errs -} - -func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors { - - switch key := node.Key.(type) { - case String: - path = append(path, string(key)) - default: // other key types cannot conflict with data - return nil - } - - if len(node.Values) > 0 { - s := strings.Join(path, "/") - if ok, err := exists(path); err != nil { - return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())} - } else if ok { - return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)} - } - } - - var errs Errors - - for _, child := range node.Children { - errs = append(errs, checkDocumentConflicts(child, exists, path)...) - } - - return errs + return v1.CheckPathConflicts(c, exists) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/doc.go b/vendor/github.com/open-policy-agent/opa/ast/doc.go index 62b04e301ee..ba974e5ba60 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/doc.go +++ b/vendor/github.com/open-policy-agent/opa/ast/doc.go @@ -1,36 +1,8 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine. -// -// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc. -// -// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows: -// -// Module -// | -// +--- Package (Reference) -// | -// +--- Imports -// | | -// | +--- Import (Term) -// | -// +--- Rules -// | -// +--- Rule -// | -// +--- Head -// | | -// | +--- Name (Variable) -// | | -// | +--- Key (Term) -// | | -// | +--- Value (Term) -// | -// +--- Body -// | -// +--- Expression (Term | Terms | Variable Declaration) -// -// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports. +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. package ast diff --git a/vendor/github.com/open-policy-agent/opa/ast/env.go b/vendor/github.com/open-policy-agent/opa/ast/env.go index c767aafefb6..ef0ccf89ce8 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/env.go +++ b/vendor/github.com/open-policy-agent/opa/ast/env.go @@ -5,522 +5,8 @@ package ast import ( - "fmt" - "strings" - - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // TypeEnv contains type info for static analysis such as type checking. -type TypeEnv struct { - tree *typeTreeNode - next *TypeEnv - newChecker func() *typeChecker -} - -// newTypeEnv returns an empty TypeEnv. The constructor is not exported because -// type environments should only be created by the type checker. -func newTypeEnv(f func() *typeChecker) *TypeEnv { - return &TypeEnv{ - tree: newTypeTree(), - newChecker: f, - } -} - -// Get returns the type of x. -func (env *TypeEnv) Get(x interface{}) types.Type { - - if term, ok := x.(*Term); ok { - x = term.Value - } - - switch x := x.(type) { - - // Scalars. - case Null: - return types.NewNull() - case Boolean: - return types.NewBoolean() - case Number: - return types.NewNumber() - case String: - return types.NewString() - - // Composites. - case *Array: - static := make([]types.Type, x.Len()) - for i := range static { - tpe := env.Get(x.Elem(i).Value) - static[i] = tpe - } - - var dynamic types.Type - if len(static) == 0 { - dynamic = types.A - } - - return types.NewArray(static, dynamic) - - case *lazyObj: - return env.Get(x.force()) - case *object: - static := []*types.StaticProperty{} - var dynamic *types.DynamicProperty - - x.Foreach(func(k, v *Term) { - if IsConstant(k.Value) { - kjson, err := JSON(k.Value) - if err == nil { - tpe := env.Get(v) - static = append(static, types.NewStaticProperty(kjson, tpe)) - return - } - } - // Can't handle it as a static property, fallback to dynamic - typeK := env.Get(k.Value) - typeV := env.Get(v.Value) - dynamic = types.NewDynamicProperty(typeK, typeV) - }) - - if len(static) == 0 && dynamic == nil { - dynamic = types.NewDynamicProperty(types.A, types.A) - } - - return types.NewObject(static, dynamic) - - case Set: - var tpe types.Type - x.Foreach(func(elem *Term) { - other := env.Get(elem.Value) - tpe = types.Or(tpe, other) - }) - if tpe == nil { - tpe = types.A - } - return types.NewSet(tpe) - - // Comprehensions. - case *ArrayComprehension: - cpy, errs := env.newChecker().CheckBody(env, x.Body) - if len(errs) == 0 { - return types.NewArray(nil, cpy.Get(x.Term)) - } - return nil - case *ObjectComprehension: - cpy, errs := env.newChecker().CheckBody(env, x.Body) - if len(errs) == 0 { - return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value))) - } - return nil - case *SetComprehension: - cpy, errs := env.newChecker().CheckBody(env, x.Body) - if len(errs) == 0 { - return types.NewSet(cpy.Get(x.Term)) - } - return nil - - // Refs. - case Ref: - return env.getRef(x) - - // Vars. - case Var: - if node := env.tree.Child(x); node != nil { - return node.Value() - } - if env.next != nil { - return env.next.Get(x) - } - return nil - - // Calls. - case Call: - return nil - - default: - panic("unreachable") - } -} - -func (env *TypeEnv) getRef(ref Ref) types.Type { - - node := env.tree.Child(ref[0].Value) - if node == nil { - return env.getRefFallback(ref) - } - - return env.getRefRec(node, ref, ref[1:]) -} - -func (env *TypeEnv) getRefFallback(ref Ref) types.Type { - - if env.next != nil { - return env.next.Get(ref) - } - - if RootDocumentNames.Contains(ref[0]) { - return types.A - } - - return nil -} - -func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type { - if len(tail) == 0 { - return env.getRefRecExtent(node) - } - - if node.Leaf() { - if node.children.Len() > 0 { - if child := node.Child(tail[0].Value); child != nil { - return env.getRefRec(child, ref, tail[1:]) - } - } - return selectRef(node.Value(), tail) - } - - if !IsConstant(tail[0].Value) { - return selectRef(env.getRefRecExtent(node), tail) - } - - child := node.Child(tail[0].Value) - if child == nil { - return env.getRefFallback(ref) - } - - return env.getRefRec(child, ref, tail[1:]) -} - -func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type { - - if node.Leaf() { - return node.Value() - } - - children := []*types.StaticProperty{} - - node.Children().Iter(func(k, v util.T) bool { - key := k.(Value) - child := v.(*typeTreeNode) - - tpe := env.getRefRecExtent(child) - - // NOTE(sr): Converting to Golang-native types here is an extension of what we did - // before -- only supporting strings. But since we cannot differentiate sets and arrays - // that way, we could reconsider. - switch key.(type) { - case String, Number, Boolean: // skip anything else - propKey, err := JSON(key) - if err != nil { - panic(fmt.Errorf("unreachable, ValueToInterface: %w", err)) - } - children = append(children, types.NewStaticProperty(propKey, tpe)) - } - return false - }) - - // TODO(tsandall): for now, these objects can have any dynamic properties - // because we don't have schema for base docs. Once schemas are supported - // we can improve this. - return types.NewObject(children, types.NewDynamicProperty(types.S, types.A)) -} - -func (env *TypeEnv) wrap() *TypeEnv { - cpy := *env - cpy.next = env - cpy.tree = newTypeTree() - return &cpy -} - -// typeTreeNode is used to store type information in a tree. -type typeTreeNode struct { - key Value - value types.Type - children *util.HashMap -} - -func newTypeTree() *typeTreeNode { - return &typeTreeNode{ - key: nil, - value: nil, - children: util.NewHashMap(valueEq, valueHash), - } -} - -func (n *typeTreeNode) Child(key Value) *typeTreeNode { - value, ok := n.children.Get(key) - if !ok { - return nil - } - return value.(*typeTreeNode) -} - -func (n *typeTreeNode) Children() *util.HashMap { - return n.children -} - -func (n *typeTreeNode) Get(path Ref) types.Type { - curr := n - for _, term := range path { - child, ok := curr.children.Get(term.Value) - if !ok { - return nil - } - curr = child.(*typeTreeNode) - } - return curr.Value() -} - -func (n *typeTreeNode) Leaf() bool { - return n.value != nil -} - -func (n *typeTreeNode) PutOne(key Value, tpe types.Type) { - c, ok := n.children.Get(key) - - var child *typeTreeNode - if !ok { - child = newTypeTree() - child.key = key - n.children.Put(key, child) - } else { - child = c.(*typeTreeNode) - } - - child.value = tpe -} - -func (n *typeTreeNode) Put(path Ref, tpe types.Type) { - curr := n - for _, term := range path { - c, ok := curr.children.Get(term.Value) - - var child *typeTreeNode - if !ok { - child = newTypeTree() - child.key = term.Value - curr.children.Put(child.key, child) - } else { - child = c.(*typeTreeNode) - } - - curr = child - } - curr.value = tpe -} - -// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path. -// If a types.Object is inserted, any leafs already present further down the tree are merged into the inserted object. -// path must be ground. -func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) { - curr := n - for i, term := range path { - c, ok := curr.children.Get(term.Value) - - var child *typeTreeNode - if !ok { - child = newTypeTree() - child.key = term.Value - curr.children.Put(child.key, child) - } else { - child = c.(*typeTreeNode) - - if child.value != nil && i+1 < len(path) { - // If child has an object value, merge the new value into it. - if o, ok := child.value.(*types.Object); ok { - var err error - child.value, err = insertIntoObject(o, path[i+1:], tpe, env) - if err != nil { - panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) - } - } - } - } - - curr = child - } - - curr.value = mergeTypes(curr.value, tpe) - - if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 { - // merge all leafs into the inserted object - leafs := curr.Leafs() - for p, t := range leafs { - var err error - curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env) - if err != nil { - panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) - } - } - } -} - -// mergeTypes merges the types of 'a' and 'b'. If both are sets, their 'of' types are joined with an types.Or. -// If both are objects, the key types of their dynamic properties are joined with types.Or:s, and their value types -// are recursively merged (using mergeTypes). -// If 'a' and 'b' are both objects, and at least one of them have static properties, they are joined -// with an types.Or, instead of being merged. -// If 'a' is an Any containing an Object, and 'b' is an Object (or vice versa); AND both objects have no -// static properties, they are merged. -// If 'a' and 'b' are different types, they are joined with an types.Or. -func mergeTypes(a, b types.Type) types.Type { - if a == nil { - return b - } - - if b == nil { - return a - } - - switch a := a.(type) { - case *types.Object: - if bObj, ok := b.(*types.Object); ok && len(a.StaticProperties()) == 0 && len(bObj.StaticProperties()) == 0 { - if len(a.StaticProperties()) > 0 || len(bObj.StaticProperties()) > 0 { - return types.Or(a, bObj) - } - - aDynProps := a.DynamicProperties() - bDynProps := bObj.DynamicProperties() - dynProps := types.NewDynamicProperty( - types.Or(aDynProps.Key, bDynProps.Key), - mergeTypes(aDynProps.Value, bDynProps.Value)) - return types.NewObject(nil, dynProps) - } else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 { - // If a is an object type with no static components ... - for _, t := range bAny { - if tObj, ok := t.(*types.Object); ok && len(tObj.StaticProperties()) == 0 { - // ... and b is a types.Any containing an object with no static components, we merge them. - aDynProps := a.DynamicProperties() - tDynProps := tObj.DynamicProperties() - tDynProps.Key = types.Or(tDynProps.Key, aDynProps.Key) - tDynProps.Value = types.Or(tDynProps.Value, aDynProps.Value) - return bAny - } - } - } - case *types.Set: - if bSet, ok := b.(*types.Set); ok { - return types.NewSet(types.Or(a.Of(), bSet.Of())) - } - case types.Any: - if _, ok := b.(types.Any); !ok { - return mergeTypes(b, a) - } - } - - return types.Or(a, b) -} - -func (n *typeTreeNode) String() string { - b := strings.Builder{} - - if k := n.key; k != nil { - b.WriteString(k.String()) - } else { - b.WriteString("-") - } - - if v := n.value; v != nil { - b.WriteString(": ") - b.WriteString(v.String()) - } - - n.children.Iter(func(_, v util.T) bool { - if child, ok := v.(*typeTreeNode); ok { - b.WriteString("\n\t+ ") - s := child.String() - s = strings.ReplaceAll(s, "\n", "\n\t") - b.WriteString(s) - } - return false - }) - - return b.String() -} - -func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (*types.Object, error) { - if len(path) == 0 { - return o, nil - } - - key := env.Get(path[0].Value) - - if len(path) == 1 { - var dynamicProps *types.DynamicProperty - if dp := o.DynamicProperties(); dp != nil { - dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, tpe)) - } else { - dynamicProps = types.NewDynamicProperty(key, tpe) - } - return types.NewObject(o.StaticProperties(), dynamicProps), nil - } - - child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe, env) - if err != nil { - return nil, err - } - - var dynamicProps *types.DynamicProperty - if dp := o.DynamicProperties(); dp != nil { - dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, child)) - } else { - dynamicProps = types.NewDynamicProperty(key, child) - } - return types.NewObject(o.StaticProperties(), dynamicProps), nil -} - -func (n *typeTreeNode) Leafs() map[*Ref]types.Type { - leafs := map[*Ref]types.Type{} - n.children.Iter(func(_, v util.T) bool { - collectLeafs(v.(*typeTreeNode), nil, leafs) - return false - }) - return leafs -} - -func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) { - nPath := append(path, NewTerm(n.key)) - if n.Leaf() { - leafs[&nPath] = n.Value() - return - } - n.children.Iter(func(_, v util.T) bool { - collectLeafs(v.(*typeTreeNode), nPath, leafs) - return false - }) -} - -func (n *typeTreeNode) Value() types.Type { - return n.value -} - -// selectConstant returns the attribute of the type referred to by the term. If -// the attribute type cannot be determined, nil is returned. -func selectConstant(tpe types.Type, term *Term) types.Type { - x, err := JSON(term.Value) - if err == nil { - return types.Select(tpe, x) - } - return nil -} - -// selectRef returns the type of the nested attribute referred to by ref. If -// the attribute type cannot be determined, nil is returned. If the ref -// contains vars or refs, then the returned type will be a union of the -// possible types. -func selectRef(tpe types.Type, ref Ref) types.Type { - - if tpe == nil || len(ref) == 0 { - return tpe - } - - head, tail := ref[0], ref[1:] - - switch head.Value.(type) { - case Var, Ref, *Array, Object, Set: - return selectRef(types.Values(tpe), tail) - default: - return selectRef(selectConstant(tpe, head), tail) - } -} +type TypeEnv = v1.TypeEnv diff --git a/vendor/github.com/open-policy-agent/opa/ast/errors.go b/vendor/github.com/open-policy-agent/opa/ast/errors.go index 066dfcdd68c..0cb8ee28f70 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/errors.go +++ b/vendor/github.com/open-policy-agent/opa/ast/errors.go @@ -5,119 +5,42 @@ package ast import ( - "fmt" - "sort" - "strings" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Errors represents a series of errors encountered during parsing, compiling, // etc. -type Errors []*Error - -func (e Errors) Error() string { - - if len(e) == 0 { - return "no error(s)" - } - - if len(e) == 1 { - return fmt.Sprintf("1 error occurred: %v", e[0].Error()) - } - - s := make([]string, len(e)) - for i, err := range e { - s[i] = err.Error() - } - - return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n")) -} - -// Sort sorts the error slice by location. If the locations are equal then the -// error message is compared. -func (e Errors) Sort() { - sort.Slice(e, func(i, j int) bool { - a := e[i] - b := e[j] - - if cmp := a.Location.Compare(b.Location); cmp != 0 { - return cmp < 0 - } - - return a.Error() < b.Error() - }) -} +type Errors = v1.Errors const ( // ParseErr indicates an unclassified parse error occurred. - ParseErr = "rego_parse_error" + ParseErr = v1.ParseErr // CompileErr indicates an unclassified compile error occurred. - CompileErr = "rego_compile_error" + CompileErr = v1.CompileErr // TypeErr indicates a type error was caught. - TypeErr = "rego_type_error" + TypeErr = v1.TypeErr // UnsafeVarErr indicates an unsafe variable was found during compilation. - UnsafeVarErr = "rego_unsafe_var_error" + UnsafeVarErr = v1.UnsafeVarErr // RecursionErr indicates recursion was found during compilation. - RecursionErr = "rego_recursion_error" + RecursionErr = v1.RecursionErr ) // IsError returns true if err is an AST error with code. func IsError(code string, err error) bool { - if err, ok := err.(*Error); ok { - return err.Code == code - } - return false + return v1.IsError(code, err) } // ErrorDetails defines the interface for detailed error messages. -type ErrorDetails interface { - Lines() []string -} +type ErrorDetails = v1.ErrorDetails // Error represents a single error caught during parsing, compiling, etc. -type Error struct { - Code string `json:"code"` - Message string `json:"message"` - Location *Location `json:"location,omitempty"` - Details ErrorDetails `json:"details,omitempty"` -} - -func (e *Error) Error() string { - - var prefix string - - if e.Location != nil { - - if len(e.Location.File) > 0 { - prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row) - } else { - prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col) - } - } - - msg := fmt.Sprintf("%v: %v", e.Code, e.Message) - - if len(prefix) > 0 { - msg = prefix + ": " + msg - } - - if e.Details != nil { - for _, line := range e.Details.Lines() { - msg += "\n\t" + line - } - } - - return msg -} +type Error = v1.Error // NewError returns a new Error object. func NewError(code string, loc *Location, f string, a ...interface{}) *Error { - return &Error{ - Code: code, - Location: loc, - Message: fmt.Sprintf(f, a...), - } + return v1.NewError(code, loc, f, a...) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/index.go b/vendor/github.com/open-policy-agent/opa/ast/index.go index cb0cbea323d..7e80bb7716c 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/index.go +++ b/vendor/github.com/open-policy-agent/opa/ast/index.go @@ -5,904 +5,16 @@ package ast import ( - "fmt" - "sort" - "strings" - - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // RuleIndex defines the interface for rule indices. -type RuleIndex interface { - - // Build tries to construct an index for the given rules. If the index was - // constructed, it returns true, otherwise false. - Build(rules []*Rule) bool - - // Lookup searches the index for rules that will match the provided - // resolver. If the resolver returns an error, it is returned via err. - Lookup(resolver ValueResolver) (*IndexResult, error) - - // AllRules traverses the index and returns all rules that will match - // the provided resolver without any optimizations (effectively with - // indexing disabled). If the resolver returns an error, it is returned - // via err. - AllRules(resolver ValueResolver) (*IndexResult, error) -} +type RuleIndex v1.RuleIndex // IndexResult contains the result of an index lookup. -type IndexResult struct { - Kind RuleKind - Rules []*Rule - Else map[*Rule][]*Rule - Default *Rule - EarlyExit bool - OnlyGroundRefs bool -} +type IndexResult = v1.IndexResult // NewIndexResult returns a new IndexResult object. func NewIndexResult(kind RuleKind) *IndexResult { - return &IndexResult{ - Kind: kind, - Else: map[*Rule][]*Rule{}, - } -} - -// Empty returns true if there are no rules to evaluate. -func (ir *IndexResult) Empty() bool { - return len(ir.Rules) == 0 && ir.Default == nil -} - -type baseDocEqIndex struct { - skipIndexing Set - isVirtual func(Ref) bool - root *trieNode - defaultRule *Rule - kind RuleKind - onlyGroundRefs bool -} - -func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex { - return &baseDocEqIndex{ - skipIndexing: NewSet(NewTerm(InternalPrint.Ref())), - isVirtual: isVirtual, - root: newTrieNodeImpl(), - onlyGroundRefs: true, - } -} - -func (i *baseDocEqIndex) Build(rules []*Rule) bool { - if len(rules) == 0 { - return false - } - - i.kind = rules[0].Head.RuleKind() - indices := newrefindices(i.isVirtual) - - // build indices for each rule. - for idx := range rules { - WalkRules(rules[idx], func(rule *Rule) bool { - if rule.Default { - i.defaultRule = rule - return false - } - if i.onlyGroundRefs { - i.onlyGroundRefs = rule.Head.Reference.IsGround() - } - var skip bool - for _, expr := range rule.Body { - if op := expr.OperatorTerm(); op != nil && i.skipIndexing.Contains(op) { - skip = true - break - } - } - if !skip { - for _, expr := range rule.Body { - indices.Update(rule, expr) - } - } - return false - }) - } - - // build trie out of indices. - for idx := range rules { - var prio int - WalkRules(rules[idx], func(rule *Rule) bool { - if rule.Default { - return false - } - node := i.root - if indices.Indexed(rule) { - for _, ref := range indices.Sorted() { - node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref)) - } - } - // Insert rule into trie with (insertion order, priority order) - // tuple. Retaining the insertion order allows us to return rules - // in the order they were passed to this function. - node.append([...]int{idx, prio}, rule) - prio++ - return false - }) - } - return true -} - -func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) { - - tr := newTrieTraversalResult() - - err := i.root.Traverse(resolver, tr) - if err != nil { - return nil, err - } - - result := NewIndexResult(i.kind) - result.Default = i.defaultRule - result.OnlyGroundRefs = i.onlyGroundRefs - result.Rules = make([]*Rule, 0, len(tr.ordering)) - - for _, pos := range tr.ordering { - sort.Slice(tr.unordered[pos], func(i, j int) bool { - return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1] - }) - nodes := tr.unordered[pos] - root := nodes[0].rule - - result.Rules = append(result.Rules, root) - if len(nodes) > 1 { - result.Else[root] = make([]*Rule, len(nodes)-1) - for i := 1; i < len(nodes); i++ { - result.Else[root][i-1] = nodes[i].rule - } - } - } - - result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround() - - return result, nil -} - -func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) { - tr := newTrieTraversalResult() - - // Walk over the rule trie and accumulate _all_ rules - rw := &ruleWalker{result: tr} - i.root.Do(rw) - - result := NewIndexResult(i.kind) - result.Default = i.defaultRule - result.OnlyGroundRefs = i.onlyGroundRefs - result.Rules = make([]*Rule, 0, len(tr.ordering)) - - for _, pos := range tr.ordering { - sort.Slice(tr.unordered[pos], func(i, j int) bool { - return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1] - }) - nodes := tr.unordered[pos] - root := nodes[0].rule - result.Rules = append(result.Rules, root) - if len(nodes) > 1 { - result.Else[root] = make([]*Rule, len(nodes)-1) - for i := 1; i < len(nodes); i++ { - result.Else[root][i-1] = nodes[i].rule - } - } - } - - result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround() - - return result, nil -} - -type ruleWalker struct { - result *trieTraversalResult -} - -func (r *ruleWalker) Do(x interface{}) trieWalker { - tn := x.(*trieNode) - r.result.Add(tn) - return r -} - -type valueMapper struct { - Key string - MapValue func(Value) Value -} - -type refindex struct { - Ref Ref - Value Value - Mapper *valueMapper -} - -type refindices struct { - isVirtual func(Ref) bool - rules map[*Rule][]*refindex - frequency *util.HashMap - sorted []Ref -} - -func newrefindices(isVirtual func(Ref) bool) *refindices { - return &refindices{ - isVirtual: isVirtual, - rules: map[*Rule][]*refindex{}, - frequency: util.NewHashMap(func(a, b util.T) bool { - r1, r2 := a.(Ref), b.(Ref) - return r1.Equal(r2) - }, func(x util.T) int { - return x.(Ref).Hash() - }), - } -} - -// Update attempts to update the refindices for the given expression in the -// given rule. If the expression cannot be indexed the update does not affect -// the indices. -func (i *refindices) Update(rule *Rule, expr *Expr) { - - if expr.Negated { - return - } - - if len(expr.With) > 0 { - // NOTE(tsandall): In the future, we may need to consider expressions - // that have with statements applied to them. - return - } - - op := expr.Operator() - - switch { - case op.Equal(Equality.Ref()): - i.updateEq(rule, expr) - - case op.Equal(Equal.Ref()) && len(expr.Operands()) == 2: - // NOTE(tsandall): if equal() is called with more than two arguments the - // output value is being captured in which case the indexer cannot - // exclude the rule if the equal() call would return false (because the - // false value must still be produced.) - i.updateEq(rule, expr) - - case op.Equal(GlobMatch.Ref()) && len(expr.Operands()) == 3: - // NOTE(sr): Same as with equal() above -- 4 operands means the output - // of `glob.match` is captured and the rule can thus not be excluded. - i.updateGlobMatch(rule, expr) - } -} - -// Sorted returns a sorted list of references that the indices were built from. -// References that appear more frequently in the indexed rules are ordered -// before less frequently appearing references. -func (i *refindices) Sorted() []Ref { - - if i.sorted == nil { - counts := make([]int, 0, i.frequency.Len()) - i.sorted = make([]Ref, 0, i.frequency.Len()) - - i.frequency.Iter(func(k, v util.T) bool { - counts = append(counts, v.(int)) - i.sorted = append(i.sorted, k.(Ref)) - return false - }) - - sort.Slice(i.sorted, func(a, b int) bool { - if counts[a] > counts[b] { - return true - } else if counts[b] > counts[a] { - return false - } - return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0 - }) - } - - return i.sorted -} - -func (i *refindices) Indexed(rule *Rule) bool { - return len(i.rules[rule]) > 0 -} - -func (i *refindices) Value(rule *Rule, ref Ref) Value { - if index := i.index(rule, ref); index != nil { - return index.Value - } - return nil -} - -func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper { - if index := i.index(rule, ref); index != nil { - return index.Mapper - } - return nil -} - -func (i *refindices) updateEq(rule *Rule, expr *Expr) { - a, b := expr.Operand(0), expr.Operand(1) - args := rule.Head.Args - if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok { - i.insert(rule, idx) - return - } - if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, b, a); ok { - i.insert(rule, idx) - return - } -} - -func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { - args := rule.Head.Args - - delim, ok := globDelimiterToString(expr.Operand(1)) - if !ok { - return - } - - if arr := globPatternToArray(expr.Operand(0), delim); arr != nil { - // The 3rd operand of glob.match is the value to match. We assume the - // 3rd operand was a reference that has been rewritten and bound to a - // variable earlier in the query OR a function argument variable. - match := expr.Operand(2) - if _, ok := match.Value.(Var); ok { - var ref Ref - for _, other := range i.rules[rule] { - if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 { - ref = other.Ref - } - } - if ref == nil { - for j, arg := range args { - if arg.Equal(match) { - ref = Ref{FunctionArgRootDocument, IntNumberTerm(j)} - } - } - } - if ref != nil { - i.insert(rule, &refindex{ - Ref: ref, - Value: arr.Value, - Mapper: &valueMapper{ - Key: delim, - MapValue: func(v Value) Value { - if s, ok := v.(String); ok { - return stringSliceToArray(splitStringEscaped(string(s), delim)) - } - return v - }, - }, - }) - } - } - } -} - -func (i *refindices) insert(rule *Rule, index *refindex) { - - count, ok := i.frequency.Get(index.Ref) - if !ok { - count = 0 - } - - i.frequency.Put(index.Ref, count.(int)+1) - - for pos, other := range i.rules[rule] { - if other.Ref.Equal(index.Ref) { - i.rules[rule][pos] = index - return - } - } - - i.rules[rule] = append(i.rules[rule], index) -} - -func (i *refindices) index(rule *Rule, ref Ref) *refindex { - for _, index := range i.rules[rule] { - if index.Ref.Equal(ref) { - return index - } - } - return nil -} - -type trieWalker interface { - Do(x interface{}) trieWalker -} - -type trieTraversalResult struct { - unordered map[int][]*ruleNode - ordering []int - values Set -} - -func newTrieTraversalResult() *trieTraversalResult { - return &trieTraversalResult{ - unordered: map[int][]*ruleNode{}, - values: NewSet(), - } -} - -func (tr *trieTraversalResult) Add(t *trieNode) { - for _, node := range t.rules { - root := node.prio[0] - nodes, ok := tr.unordered[root] - if !ok { - tr.ordering = append(tr.ordering, root) - } - tr.unordered[root] = append(nodes, node) - } - if t.values != nil { - t.values.Foreach(func(v *Term) { tr.values.Add(v) }) - } -} - -type trieNode struct { - ref Ref - values Set - mappers []*valueMapper - next *trieNode - any *trieNode - undefined *trieNode - scalars *util.HashMap - array *trieNode - rules []*ruleNode -} - -func (node *trieNode) String() string { - var flags []string - flags = append(flags, fmt.Sprintf("self:%p", node)) - if len(node.ref) > 0 { - flags = append(flags, node.ref.String()) - } - if node.next != nil { - flags = append(flags, fmt.Sprintf("next:%p", node.next)) - } - if node.any != nil { - flags = append(flags, fmt.Sprintf("any:%p", node.any)) - } - if node.undefined != nil { - flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined)) - } - if node.array != nil { - flags = append(flags, fmt.Sprintf("array:%p", node.array)) - } - if node.scalars.Len() > 0 { - buf := make([]string, 0, node.scalars.Len()) - node.scalars.Iter(func(k, v util.T) bool { - key := k.(Value) - val := v.(*trieNode) - buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val)) - return false - }) - sort.Strings(buf) - flags = append(flags, strings.Join(buf, " ")) - } - if len(node.rules) > 0 { - flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules))) - } - if len(node.mappers) > 0 { - flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers))) - } - if node.values != nil { - if l := node.values.Len(); l > 0 { - flags = append(flags, fmt.Sprintf("%d value(s)", l)) - } - } - return strings.Join(flags, " ") -} - -func (node *trieNode) append(prio [2]int, rule *Rule) { - node.rules = append(node.rules, &ruleNode{prio, rule}) - - if node.values != nil && rule.Head.Value != nil { - node.values.Add(rule.Head.Value) - return - } - - if node.values == nil && rule.Head.DocKind() == CompleteDoc { - node.values = NewSet(rule.Head.Value) - } -} - -type ruleNode struct { - prio [2]int - rule *Rule -} - -func newTrieNodeImpl() *trieNode { - return &trieNode{ - scalars: util.NewHashMap(valueEq, valueHash), - } -} - -func (node *trieNode) Do(walker trieWalker) { - next := walker.Do(node) - if next == nil { - return - } - if node.any != nil { - node.any.Do(next) - } - if node.undefined != nil { - node.undefined.Do(next) - } - - node.scalars.Iter(func(_, v util.T) bool { - child := v.(*trieNode) - child.Do(next) - return false - }) - - if node.array != nil { - node.array.Do(next) - } - if node.next != nil { - node.next.Do(next) - } -} - -func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode { - - if node.next == nil { - node.next = newTrieNodeImpl() - node.next.ref = ref - } - - if mapper != nil { - node.next.addMapper(mapper) - } - - return node.next.insertValue(value) -} - -func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error { - - if node == nil { - return nil - } - - tr.Add(node) - - return node.next.traverse(resolver, tr) -} - -func (node *trieNode) addMapper(mapper *valueMapper) { - for i := range node.mappers { - if node.mappers[i].Key == mapper.Key { - return - } - } - node.mappers = append(node.mappers, mapper) -} - -func (node *trieNode) insertValue(value Value) *trieNode { - - switch value := value.(type) { - case nil: - if node.undefined == nil { - node.undefined = newTrieNodeImpl() - } - return node.undefined - case Var: - if node.any == nil { - node.any = newTrieNodeImpl() - } - return node.any - case Null, Boolean, Number, String: - child, ok := node.scalars.Get(value) - if !ok { - child = newTrieNodeImpl() - node.scalars.Put(value, child) - } - return child.(*trieNode) - case *Array: - if node.array == nil { - node.array = newTrieNodeImpl() - } - return node.array.insertArray(value) - } - - panic("illegal value") -} - -func (node *trieNode) insertArray(arr *Array) *trieNode { - - if arr.Len() == 0 { - return node - } - - switch head := arr.Elem(0).Value.(type) { - case Var: - if node.any == nil { - node.any = newTrieNodeImpl() - } - return node.any.insertArray(arr.Slice(1, -1)) - case Null, Boolean, Number, String: - child, ok := node.scalars.Get(head) - if !ok { - child = newTrieNodeImpl() - node.scalars.Put(head, child) - } - return child.(*trieNode).insertArray(arr.Slice(1, -1)) - } - - panic("illegal value") -} - -func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error { - - if node == nil { - return nil - } - - v, err := resolver.Resolve(node.ref) - if err != nil { - if IsUnknownValueErr(err) { - return node.traverseUnknown(resolver, tr) - } - return err - } - - if node.undefined != nil { - err = node.undefined.Traverse(resolver, tr) - if err != nil { - return err - } - } - - if v == nil { - return nil - } - - if node.any != nil { - err = node.any.Traverse(resolver, tr) - if err != nil { - return err - } - } - - if err := node.traverseValue(resolver, tr, v); err != nil { - return err - } - - for i := range node.mappers { - if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil { - return err - } - } - - return nil -} - -func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error { - - switch value := value.(type) { - case *Array: - if node.array == nil { - return nil - } - return node.array.traverseArray(resolver, tr, value) - - case Null, Boolean, Number, String: - child, ok := node.scalars.Get(value) - if !ok { - return nil - } - return child.(*trieNode).Traverse(resolver, tr) - } - - return nil -} - -func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error { - - if arr.Len() == 0 { - return node.Traverse(resolver, tr) - } - - if node.any != nil { - err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1)) - if err != nil { - return err - } - } - - head := arr.Elem(0).Value - - if !IsScalar(head) { - return nil - } - - child, ok := node.scalars.Get(head) - if !ok { - return nil - } - return child.(*trieNode).traverseArray(resolver, tr, arr.Slice(1, -1)) -} - -func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error { - - if node == nil { - return nil - } - - if err := node.Traverse(resolver, tr); err != nil { - return err - } - - if err := node.undefined.traverseUnknown(resolver, tr); err != nil { - return err - } - - if err := node.any.traverseUnknown(resolver, tr); err != nil { - return err - } - - if err := node.array.traverseUnknown(resolver, tr); err != nil { - return err - } - - var iterErr error - node.scalars.Iter(func(_, v util.T) bool { - child := v.(*trieNode) - if iterErr = child.traverseUnknown(resolver, tr); iterErr != nil { - return true - } - return false - }) - - return iterErr -} - -// If term `a` is one of the function's operands, we store a Ref: `args[0]` -// for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll -// bind `args[0]` and `args[1]` to this rule when called for (x=10) and -// (y=12) respectively. -func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) (*refindex, bool) { - switch v := a.Value.(type) { - case Var: - for i, arg := range args { - if arg.Value.Compare(v) == 0 { - if bval, ok := indexValue(b); ok { - return &refindex{Ref: Ref{FunctionArgRootDocument, IntNumberTerm(i)}, Value: bval}, true - } - } - } - case Ref: - if !RootDocumentNames.Contains(v[0]) { - return nil, false - } - if isVirtual(v) { - return nil, false - } - if v.IsNested() || !v.IsGround() { - return nil, false - } - if bval, ok := indexValue(b); ok { - return &refindex{Ref: v, Value: bval}, true - } - } - return nil, false -} - -func indexValue(b *Term) (Value, bool) { - switch b := b.Value.(type) { - case Null, Boolean, Number, String, Var: - return b, true - case *Array: - stop := false - first := true - vis := NewGenericVisitor(func(x interface{}) bool { - if first { - first = false - return false - } - switch x.(type) { - // No nested structures or values that require evaluation (other than var). - case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref: - stop = true - } - return stop - }) - vis.Walk(b) - if !stop { - return b, true - } - } - - return nil, false -} - -func globDelimiterToString(delim *Term) (string, bool) { - - arr, ok := delim.Value.(*Array) - if !ok { - return "", false - } - - var result string - - if arr.Len() == 0 { - result = "." - } else { - for i := 0; i < arr.Len(); i++ { - term := arr.Elem(i) - s, ok := term.Value.(String) - if !ok { - return "", false - } - result += string(s) - } - } - - return result, true -} - -func globPatternToArray(pattern *Term, delim string) *Term { - - s, ok := pattern.Value.(String) - if !ok { - return nil - } - - parts := splitStringEscaped(string(s), delim) - arr := make([]*Term, len(parts)) - - for i := range parts { - if parts[i] == "*" { - arr[i] = VarTerm("$globwildcard") - } else { - var escaped bool - for _, c := range parts[i] { - if c == '\\' { - escaped = !escaped - continue - } - if !escaped { - switch c { - case '[', '?', '{', '*': - // TODO(tsandall): super glob and character pattern - // matching not supported yet. - return nil - } - } - escaped = false - } - arr[i] = StringTerm(parts[i]) - } - } - - return NewTerm(NewArray(arr...)) -} - -// splits s on characters in delim except if delim characters have been escaped -// with reverse solidus. -func splitStringEscaped(s string, delim string) []string { - - var last, curr int - var escaped bool - var result []string - - for ; curr < len(s); curr++ { - if s[curr] == '\\' || escaped { - escaped = !escaped - continue - } - if strings.ContainsRune(delim, rune(s[curr])) { - result = append(result, s[last:curr]) - last = curr + 1 - } - } - - result = append(result, s[last:]) - - return result -} - -func stringSliceToArray(s []string) *Array { - arr := make([]*Term, len(s)) - for i, v := range s { - arr[i] = StringTerm(v) - } - return NewArray(arr...) + return v1.NewIndexResult(kind) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/interning.go b/vendor/github.com/open-policy-agent/opa/ast/interning.go new file mode 100644 index 00000000000..239293664be --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/ast/interning.go @@ -0,0 +1,24 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + v1 "github.com/open-policy-agent/opa/v1/ast" +) + +func InternedBooleanTerm(b bool) *Term { + return v1.InternedBooleanTerm(b) +} + +// InternedIntNumberTerm returns a term with the given integer value. The term is +// cached between -1 to 512, and for values outside of that range, this function +// is equivalent to ast.IntNumberTerm. +func InternedIntNumberTerm(i int) *Term { + return v1.InternedIntNumberTerm(i) +} + +func HasInternedIntNumberTerm(i int) bool { + return v1.HasInternedIntNumberTerm(i) +} diff --git a/vendor/github.com/open-policy-agent/opa/ast/json/doc.go b/vendor/github.com/open-policy-agent/opa/ast/json/doc.go new file mode 100644 index 00000000000..26aee9b9940 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/ast/json/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package json diff --git a/vendor/github.com/open-policy-agent/opa/ast/json/json.go b/vendor/github.com/open-policy-agent/opa/ast/json/json.go index 565017d58e3..8a3a36bb9b1 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/json/json.go +++ b/vendor/github.com/open-policy-agent/opa/ast/json/json.go @@ -1,36 +1,15 @@ package json +import v1 "github.com/open-policy-agent/opa/v1/ast/json" + // Options defines the options for JSON operations, // currently only marshaling can be configured -type Options struct { - MarshalOptions MarshalOptions -} +type Options = v1.Options // MarshalOptions defines the options for JSON marshaling, // currently only toggling the marshaling of location information is supported -type MarshalOptions struct { - // IncludeLocation toggles the marshaling of location information - IncludeLocation NodeToggle - // IncludeLocationText additionally/optionally includes the text of the location - IncludeLocationText bool - // ExcludeLocationFile additionally/optionally excludes the file of the location - // Note that this is inverted (i.e. not "include" as the default needs to remain false) - ExcludeLocationFile bool -} +type MarshalOptions = v1.MarshalOptions // NodeToggle is a generic struct to allow the toggling of // settings for different ast node types -type NodeToggle struct { - Term bool - Package bool - Comment bool - Import bool - Rule bool - Head bool - Expr bool - SomeDecl bool - Every bool - With bool - Annotations bool - AnnotationsRef bool -} +type NodeToggle = v1.NodeToggle diff --git a/vendor/github.com/open-policy-agent/opa/ast/map.go b/vendor/github.com/open-policy-agent/opa/ast/map.go index b0cc9eb60f2..070ad3e5ded 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/map.go +++ b/vendor/github.com/open-policy-agent/opa/ast/map.go @@ -5,129 +5,14 @@ package ast import ( - "encoding/json" - - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // ValueMap represents a key/value map between AST term values. Any type of term // can be used as a key in the map. -type ValueMap struct { - hashMap *util.HashMap -} +type ValueMap = v1.ValueMap // NewValueMap returns a new ValueMap. func NewValueMap() *ValueMap { - vs := &ValueMap{ - hashMap: util.NewHashMap(valueEq, valueHash), - } - return vs -} - -// MarshalJSON provides a custom marshaller for the ValueMap which -// will include the key, value, and value type. -func (vs *ValueMap) MarshalJSON() ([]byte, error) { - var tmp []map[string]interface{} - vs.Iter(func(k Value, v Value) bool { - tmp = append(tmp, map[string]interface{}{ - "name": k.String(), - "type": TypeName(v), - "value": v, - }) - return false - }) - return json.Marshal(tmp) -} - -// Copy returns a shallow copy of the ValueMap. -func (vs *ValueMap) Copy() *ValueMap { - if vs == nil { - return nil - } - cpy := NewValueMap() - cpy.hashMap = vs.hashMap.Copy() - return cpy -} - -// Equal returns true if this ValueMap equals the other. -func (vs *ValueMap) Equal(other *ValueMap) bool { - if vs == nil { - return other == nil || other.Len() == 0 - } - if other == nil { - return vs == nil || vs.Len() == 0 - } - return vs.hashMap.Equal(other.hashMap) -} - -// Len returns the number of elements in the map. -func (vs *ValueMap) Len() int { - if vs == nil { - return 0 - } - return vs.hashMap.Len() -} - -// Get returns the value in the map for k. -func (vs *ValueMap) Get(k Value) Value { - if vs != nil { - if v, ok := vs.hashMap.Get(k); ok { - return v.(Value) - } - } - return nil -} - -// Hash returns a hash code for this ValueMap. -func (vs *ValueMap) Hash() int { - if vs == nil { - return 0 - } - return vs.hashMap.Hash() -} - -// Iter calls the iter function for each key/value pair in the map. If the iter -// function returns true, iteration stops. -func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool { - if vs == nil { - return false - } - return vs.hashMap.Iter(func(kt, vt util.T) bool { - k := kt.(Value) - v := vt.(Value) - return iter(k, v) - }) -} - -// Put inserts a key k into the map with value v. -func (vs *ValueMap) Put(k, v Value) { - if vs == nil { - panic("put on nil value map") - } - vs.hashMap.Put(k, v) -} - -// Delete removes a key k from the map. -func (vs *ValueMap) Delete(k Value) { - if vs == nil { - return - } - vs.hashMap.Delete(k) -} - -func (vs *ValueMap) String() string { - if vs == nil { - return "{}" - } - return vs.hashMap.String() -} - -func valueHash(v util.T) int { - return v.(Value).Hash() -} - -func valueEq(a, b util.T) bool { - av := a.(Value) - bv := b.(Value) - return av.Compare(bv) == 0 + return v1.NewValueMap() } diff --git a/vendor/github.com/open-policy-agent/opa/ast/marshal.go b/vendor/github.com/open-policy-agent/opa/ast/marshal.go deleted file mode 100644 index 53fb1120443..00000000000 --- a/vendor/github.com/open-policy-agent/opa/ast/marshal.go +++ /dev/null @@ -1,11 +0,0 @@ -package ast - -import ( - astJSON "github.com/open-policy-agent/opa/ast/json" -) - -// customJSON is an interface that can be implemented by AST nodes that -// allows the parser to set options for JSON operations on that node. -type customJSON interface { - setJSONOptions(astJSON.Options) -} diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser.go b/vendor/github.com/open-policy-agent/opa/ast/parser.go index 09ede2baecd..45cd4da06ef 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/parser.go +++ b/vendor/github.com/open-policy-agent/opa/ast/parser.go @@ -1,2733 +1,49 @@ -// Copyright 2020 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package ast import ( - "bytes" - "encoding/json" - "fmt" - "io" - "math/big" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "unicode/utf8" - - "gopkg.in/yaml.v3" - - "github.com/open-policy-agent/opa/ast/internal/scanner" - "github.com/open-policy-agent/opa/ast/internal/tokens" - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/ast/location" + v1 "github.com/open-policy-agent/opa/v1/ast" ) -var RegoV1CompatibleRef = Ref{VarTerm("rego"), StringTerm("v1")} +var RegoV1CompatibleRef = v1.RegoV1CompatibleRef // RegoVersion defines the Rego syntax requirements for a module. -type RegoVersion int +type RegoVersion = v1.RegoVersion -const DefaultRegoVersion = RegoVersion(0) +const DefaultRegoVersion = RegoV0 const ( + RegoUndefined = v1.RegoUndefined // RegoV0 is the default, original Rego syntax. - RegoV0 RegoVersion = iota + RegoV0 = v1.RegoV0 // RegoV0CompatV1 requires modules to comply with both the RegoV0 and RegoV1 syntax (as when 'rego.v1' is imported in a module). // Shortly, RegoV1 compatibility is required, but 'rego.v1' or 'future.keywords' must also be imported. - RegoV0CompatV1 + RegoV0CompatV1 = v1.RegoV0CompatV1 // RegoV1 is the Rego syntax enforced by OPA 1.0; e.g.: // future.keywords part of default keyword set, and don't require imports; // 'if' and 'contains' required in rule heads; // (some) strict checks on by default. - RegoV1 + RegoV1 = v1.RegoV1 ) -func (v RegoVersion) Int() int { - if v == RegoV1 { - return 1 - } - return 0 -} - -func (v RegoVersion) String() string { - switch v { - case RegoV0: - return "v0" - case RegoV1: - return "v1" - case RegoV0CompatV1: - return "v0v1" - default: - return "unknown" - } -} - func RegoVersionFromInt(i int) RegoVersion { - if i == 1 { - return RegoV1 - } - return RegoV0 -} - -// Note: This state is kept isolated from the parser so that we -// can do efficient shallow copies of these values when doing a -// save() and restore(). -type state struct { - s *scanner.Scanner - lastEnd int - skippedNL bool - tok tokens.Token - tokEnd int - lit string - loc Location - errors Errors - hints []string - comments []*Comment - wildcard int -} - -func (s *state) String() string { - return fmt.Sprintf("", s.s, s.tok, s.lit, s.loc, len(s.errors), len(s.comments)) -} - -func (s *state) Loc() *location.Location { - cpy := s.loc - return &cpy -} - -func (s *state) Text(offset, end int) []byte { - bs := s.s.Bytes() - if offset >= 0 && offset < len(bs) { - if end >= offset && end <= len(bs) { - return bs[offset:end] - } - } - return nil + return v1.RegoVersionFromInt(i) } // Parser is used to parse Rego statements. -type Parser struct { - r io.Reader - s *state - po ParserOptions - cache parsedTermCache -} - -type parsedTermCacheItem struct { - t *Term - post *state // post is the post-state that's restored on a cache-hit - offset int - next *parsedTermCacheItem -} - -type parsedTermCache struct { - m *parsedTermCacheItem -} - -func (c parsedTermCache) String() string { - s := strings.Builder{} - s.WriteRune('{') - var e *parsedTermCacheItem - for e = c.m; e != nil; e = e.next { - s.WriteString(fmt.Sprintf("%v", e)) - } - s.WriteRune('}') - return s.String() -} - -func (e *parsedTermCacheItem) String() string { - return fmt.Sprintf("<%d:%v>", e.offset, e.t) -} +type Parser = v1.Parser // ParserOptions defines the options for parsing Rego statements. -type ParserOptions struct { - Capabilities *Capabilities - ProcessAnnotation bool - AllFutureKeywords bool - FutureKeywords []string - SkipRules bool - JSONOptions *astJSON.Options - // RegoVersion is the version of Rego to parse for. - RegoVersion RegoVersion - unreleasedKeywords bool // TODO(sr): cleanup -} - -// EffectiveRegoVersion returns the effective RegoVersion to use for parsing. -// Deprecated: Use RegoVersion instead. -func (po *ParserOptions) EffectiveRegoVersion() RegoVersion { - return po.RegoVersion -} +type ParserOptions = v1.ParserOptions // NewParser creates and initializes a Parser. func NewParser() *Parser { - p := &Parser{ - s: &state{}, - po: ParserOptions{}, - } - return p -} - -// WithFilename provides the filename for Location details -// on parsed statements. -func (p *Parser) WithFilename(filename string) *Parser { - p.s.loc.File = filename - return p -} - -// WithReader provides the io.Reader that the parser will -// use as its source. -func (p *Parser) WithReader(r io.Reader) *Parser { - p.r = r - return p -} - -// WithProcessAnnotation enables or disables the processing of -// annotations by the Parser -func (p *Parser) WithProcessAnnotation(processAnnotation bool) *Parser { - p.po.ProcessAnnotation = processAnnotation - return p -} - -// WithFutureKeywords enables "future" keywords, i.e., keywords that can -// be imported via -// -// import future.keywords.kw -// import future.keywords.other -// -// but in a more direct way. The equivalent of this import would be -// -// WithFutureKeywords("kw", "other") -func (p *Parser) WithFutureKeywords(kws ...string) *Parser { - p.po.FutureKeywords = kws - return p -} - -// WithAllFutureKeywords enables all "future" keywords, i.e., the -// ParserOption equivalent of -// -// import future.keywords -func (p *Parser) WithAllFutureKeywords(yes bool) *Parser { - p.po.AllFutureKeywords = yes - return p -} - -// withUnreleasedKeywords allows using keywords that haven't surfaced -// as future keywords (see above) yet, but have tests that require -// them to be parsed -func (p *Parser) withUnreleasedKeywords(yes bool) *Parser { - p.po.unreleasedKeywords = yes - return p -} - -// WithCapabilities sets the capabilities structure on the parser. -func (p *Parser) WithCapabilities(c *Capabilities) *Parser { - p.po.Capabilities = c - return p -} - -// WithSkipRules instructs the parser not to attempt to parse Rule statements. -func (p *Parser) WithSkipRules(skip bool) *Parser { - p.po.SkipRules = skip - return p -} - -// WithJSONOptions sets the Options which will be set on nodes to configure -// their JSON marshaling behavior. -func (p *Parser) WithJSONOptions(jsonOptions *astJSON.Options) *Parser { - p.po.JSONOptions = jsonOptions - return p -} - -func (p *Parser) WithRegoVersion(version RegoVersion) *Parser { - p.po.RegoVersion = version - return p -} - -func (p *Parser) parsedTermCacheLookup() (*Term, *state) { - l := p.s.loc.Offset - // stop comparing once the cached offsets are lower than l - for h := p.cache.m; h != nil && h.offset >= l; h = h.next { - if h.offset == l { - return h.t, h.post - } - } - return nil, nil -} - -func (p *Parser) parsedTermCachePush(t *Term, s0 *state) { - s1 := p.save() - o0 := s0.loc.Offset - entry := parsedTermCacheItem{t: t, post: s1, offset: o0} - - // find the first one whose offset is smaller than ours - var e *parsedTermCacheItem - for e = p.cache.m; e != nil; e = e.next { - if e.offset < o0 { - break - } - } - entry.next = e - p.cache.m = &entry -} - -// futureParser returns a shallow copy of `p` with an empty -// cache, and a scanner that knows all future keywords. -// It's used to present hints in errors, when statements would -// only parse successfully if some future keyword is enabled. -func (p *Parser) futureParser() *Parser { - q := *p - q.s = p.save() - q.s.s = p.s.s.WithKeywords(futureKeywords) - q.cache = parsedTermCache{} - return &q -} - -// presentParser returns a shallow copy of `p` with an empty -// cache, and a scanner that knows none of the future keywords. -// It is used to successfully parse keyword imports, like -// -// import future.keywords.in -// -// even when the parser has already been informed about the -// future keyword "in". This parser won't error out because -// "in" is an identifier. -func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) { - var cpy map[string]tokens.Token - q := *p - q.s = p.save() - q.s.s, cpy = p.s.s.WithoutKeywords(futureKeywords) - q.cache = parsedTermCache{} - return &q, cpy -} - -// Parse will read the Rego source and parse statements and -// comments as they are found. Any errors encountered while -// parsing will be accumulated and returned as a list of Errors. -func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { - - if p.po.Capabilities == nil { - p.po.Capabilities = CapabilitiesForThisVersion() - } - - allowedFutureKeywords := map[string]tokens.Token{} - - if p.po.RegoVersion == RegoV1 { - // RegoV1 includes all future keywords in the default language definition - for k, v := range futureKeywords { - allowedFutureKeywords[k] = v - } - - // For sake of error reporting, we still need to check that keywords in capabilities are known, - for _, kw := range p.po.Capabilities.FutureKeywords { - if _, ok := futureKeywords[kw]; !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), - Location: nil, - }, - } - } - } - // and that explicitly requested future keywords are known. - for _, kw := range p.po.FutureKeywords { - if _, ok := allowedFutureKeywords[kw]; !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("unknown future keyword: %v", kw), - Location: nil, - }, - } - } - } - } else { - for _, kw := range p.po.Capabilities.FutureKeywords { - var ok bool - allowedFutureKeywords[kw], ok = futureKeywords[kw] - if !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), - Location: nil, - }, - } - } - } - } - - var err error - p.s.s, err = scanner.New(p.r) - if err != nil { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: err.Error(), - Location: nil, - }, - } - } - - selected := map[string]tokens.Token{} - if p.po.AllFutureKeywords || p.po.RegoVersion == RegoV1 { - for kw, tok := range allowedFutureKeywords { - selected[kw] = tok - } - } else { - for _, kw := range p.po.FutureKeywords { - tok, ok := allowedFutureKeywords[kw] - if !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("unknown future keyword: %v", kw), - Location: nil, - }, - } - } - selected[kw] = tok - } - } - p.s.s = p.s.s.WithKeywords(selected) - - if p.po.RegoVersion == RegoV1 { - for kw, tok := range allowedFutureKeywords { - p.s.s.AddKeyword(kw, tok) - } - } - - // read the first token to initialize the parser - p.scan() - - var stmts []Statement - - // Read from the scanner until the last token is reached or no statements - // can be parsed. Attempt to parse package statements, import statements, - // rule statements, and then body/query statements (in that order). If a - // statement cannot be parsed, restore the parser state before trying the - // next type of statement. If a statement can be parsed, continue from that - // point trying to parse packages, imports, etc. in the same order. - for p.s.tok != tokens.EOF { - - s := p.save() - - if pkg := p.parsePackage(); pkg != nil { - stmts = append(stmts, pkg) - continue - } else if len(p.s.errors) > 0 { - break - } - - p.restore(s) - s = p.save() - - if imp := p.parseImport(); imp != nil { - if RegoRootDocument.Equal(imp.Path.Value.(Ref)[0]) { - p.regoV1Import(imp) - } - - if FutureRootDocument.Equal(imp.Path.Value.(Ref)[0]) { - p.futureImport(imp, allowedFutureKeywords) - } - - stmts = append(stmts, imp) - continue - } else if len(p.s.errors) > 0 { - break - } - - p.restore(s) - - if !p.po.SkipRules { - s = p.save() - - if rules := p.parseRules(); rules != nil { - for i := range rules { - stmts = append(stmts, rules[i]) - } - continue - } else if len(p.s.errors) > 0 { - break - } - - p.restore(s) - } - - if body := p.parseQuery(true, tokens.EOF); body != nil { - stmts = append(stmts, body) - continue - } - - break - } - - if p.po.ProcessAnnotation { - stmts = p.parseAnnotations(stmts) - } - - if p.po.JSONOptions != nil { - for i := range stmts { - vis := NewGenericVisitor(func(x interface{}) bool { - if x, ok := x.(customJSON); ok { - x.setJSONOptions(*p.po.JSONOptions) - } - return false - }) - - vis.Walk(stmts[i]) - } - } - - return stmts, p.s.comments, p.s.errors -} - -func (p *Parser) parseAnnotations(stmts []Statement) []Statement { - - annotStmts, errs := parseAnnotations(p.s.comments) - for _, err := range errs { - p.error(err.Location, err.Message) - } - - for _, annotStmt := range annotStmts { - stmts = append(stmts, annotStmt) - } - - return stmts -} - -func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) { - - var hint = []byte("METADATA") - var curr *metadataParser - var blocks []*metadataParser - - for i := 0; i < len(comments); i++ { - if curr != nil { - if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 { - curr.Append(comments[i]) - continue - } - curr = nil - } - if bytes.HasPrefix(bytes.TrimSpace(comments[i].Text), hint) { - curr = newMetadataParser(comments[i].Location) - blocks = append(blocks, curr) - } - } - - var stmts []*Annotations - var errs Errors - for _, b := range blocks { - a, err := b.Parse() - if err != nil { - errs = append(errs, &Error{ - Code: ParseErr, - Message: err.Error(), - Location: b.loc, - }) - } else { - stmts = append(stmts, a) - } - } - - return stmts, errs -} - -func (p *Parser) parsePackage() *Package { - - var pkg Package - pkg.SetLoc(p.s.Loc()) - - if p.s.tok != tokens.Package { - return nil - } - - p.scan() - if p.s.tok != tokens.Ident { - p.illegalToken() - return nil - } - - term := p.parseTerm() - - if term != nil { - switch v := term.Value.(type) { - case Var: - pkg.Path = Ref{ - DefaultRootDocument.Copy().SetLocation(term.Location), - StringTerm(string(v)).SetLocation(term.Location), - } - case Ref: - pkg.Path = make(Ref, len(v)+1) - pkg.Path[0] = DefaultRootDocument.Copy().SetLocation(v[0].Location) - first, ok := v[0].Value.(Var) - if !ok { - p.errorf(v[0].Location, "unexpected %v token: expecting var", TypeName(v[0].Value)) - return nil - } - pkg.Path[1] = StringTerm(string(first)).SetLocation(v[0].Location) - for i := 2; i < len(pkg.Path); i++ { - switch v[i-1].Value.(type) { - case String: - pkg.Path[i] = v[i-1] - default: - p.errorf(v[i-1].Location, "unexpected %v token: expecting string", TypeName(v[i-1].Value)) - return nil - } - } - default: - p.illegalToken() - return nil - } - } - - if pkg.Path == nil { - if len(p.s.errors) == 0 { - p.error(p.s.Loc(), "expected path") - } - return nil - } - - return &pkg -} - -func (p *Parser) parseImport() *Import { - - var imp Import - imp.SetLoc(p.s.Loc()) - - if p.s.tok != tokens.Import { - return nil - } - - p.scan() - if p.s.tok != tokens.Ident { - p.error(p.s.Loc(), "expected ident") - return nil - } - q, prev := p.presentParser() - term := q.parseTerm() - if term != nil { - switch v := term.Value.(type) { - case Var: - imp.Path = RefTerm(term).SetLocation(term.Location) - case Ref: - for i := 1; i < len(v); i++ { - if _, ok := v[i].Value.(String); !ok { - p.errorf(v[i].Location, "unexpected %v token: expecting string", TypeName(v[i].Value)) - return nil - } - } - imp.Path = term - } - } - // keep advanced parser state, reset known keywords - p.s = q.s - p.s.s = q.s.s.WithKeywords(prev) - - if imp.Path == nil { - p.error(p.s.Loc(), "expected path") - return nil - } - - path := imp.Path.Value.(Ref) - - switch { - case RootDocumentNames.Contains(path[0]): - case FutureRootDocument.Equal(path[0]): - case RegoRootDocument.Equal(path[0]): - default: - p.hint("if this is unexpected, try updating OPA") - p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v", - RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)), - path[0]) - return nil - } - - if p.s.tok == tokens.As { - p.scan() - - if p.s.tok != tokens.Ident { - p.illegal("expected var") - return nil - } - - if alias := p.parseTerm(); alias != nil { - v, ok := alias.Value.(Var) - if ok { - imp.Alias = v - return &imp - } - } - p.illegal("expected var") - return nil - } - - return &imp -} - -func (p *Parser) parseRules() []*Rule { - - var rule Rule - rule.SetLoc(p.s.Loc()) - - if p.s.tok == tokens.Default { - p.scan() - rule.Default = true - } - - if p.s.tok != tokens.Ident { - return nil - } - - usesContains := false - if rule.Head, usesContains = p.parseHead(rule.Default); rule.Head == nil { - return nil - } - - if usesContains { - rule.Head.keywords = append(rule.Head.keywords, tokens.Contains) - } - - if rule.Default { - if !p.validateDefaultRuleValue(&rule) { - return nil - } - - if len(rule.Head.Args) > 0 { - if !p.validateDefaultRuleArgs(&rule) { - return nil - } - } - - rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) - return []*Rule{&rule} - } - - // back-compat with `p[x] { ... }`` - hasIf := p.s.tok == tokens.If - - // p[x] if ... becomes a single-value rule p[x] - if hasIf && !usesContains && len(rule.Head.Ref()) == 2 { - if !rule.Head.Ref()[1].IsGround() && len(rule.Head.Args) == 0 { - rule.Head.Key = rule.Head.Ref()[1] - } - - if rule.Head.Value == nil { - rule.Head.generatedValue = true - rule.Head.Value = BooleanTerm(true).SetLocation(rule.Head.Location) - } else { - // p[x] = y if becomes a single-value rule p[x] with value y, but needs name for compat - v, ok := rule.Head.Ref()[0].Value.(Var) - if !ok { - return nil - } - rule.Head.Name = v - } - } - - // p[x] becomes a multi-value rule p - if !hasIf && !usesContains && - len(rule.Head.Args) == 0 && // not a function - len(rule.Head.Ref()) == 2 { // ref like 'p[x]' - v, ok := rule.Head.Ref()[0].Value.(Var) - if !ok { - return nil - } - rule.Head.Name = v - rule.Head.Key = rule.Head.Ref()[1] - if rule.Head.Value == nil { - rule.Head.SetRef(rule.Head.Ref()[:len(rule.Head.Ref())-1]) - } - } - - switch { - case hasIf: - rule.Head.keywords = append(rule.Head.keywords, tokens.If) - p.scan() - s := p.save() - if expr := p.parseLiteral(); expr != nil { - // NOTE(sr): set literals are never false or undefined, so parsing this as - // p if { true } - // ^^^^^^^^ set of one element, `true` - // isn't valid. - isSetLiteral := false - if t, ok := expr.Terms.(*Term); ok { - _, isSetLiteral = t.Value.(Set) - } - // expr.Term is []*Term or Every - if !isSetLiteral { - rule.Body.Append(expr) - break - } - } - - // parsing as literal didn't work out, expect '{ BODY }' - p.restore(s) - fallthrough - - case p.s.tok == tokens.LBrace: - p.scan() - if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { - return nil - } - p.scan() - - case usesContains: - rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) - rule.generatedBody = true - rule.Location = rule.Head.Location - - return []*Rule{&rule} - - default: - return nil - } - - if p.s.tok == tokens.Else { - if r := rule.Head.Ref(); len(r) > 1 && !r.IsGround() { - p.error(p.s.Loc(), "else keyword cannot be used on rules with variables in head") - return nil - } - if rule.Head.Key != nil { - p.error(p.s.Loc(), "else keyword cannot be used on multi-value rules") - return nil - } - - if rule.Else = p.parseElse(rule.Head); rule.Else == nil { - return nil - } - } - - rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) - - rules := []*Rule{&rule} - - for p.s.tok == tokens.LBrace { - - if rule.Else != nil { - p.error(p.s.Loc(), "expected else keyword") - return nil - } - - loc := p.s.Loc() - - p.scan() - var next Rule - - if next.Body = p.parseBody(tokens.RBrace); next.Body == nil { - return nil - } - p.scan() - - loc.Text = p.s.Text(loc.Offset, p.s.lastEnd) - next.SetLoc(loc) - - // Chained rule head's keep the original - // rule's head AST but have their location - // set to the rule body. - next.Head = rule.Head.Copy() - next.Head.keywords = rule.Head.keywords - for i := range next.Head.Args { - if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() { - next.Head.Args[i].Value = Var(p.genwildcard()) - } - } - setLocRecursive(next.Head, loc) - - rules = append(rules, &next) - } - - return rules -} - -func (p *Parser) parseElse(head *Head) *Rule { - - var rule Rule - rule.SetLoc(p.s.Loc()) - - rule.Head = head.Copy() - rule.Head.generatedValue = false - for i := range rule.Head.Args { - if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() { - rule.Head.Args[i].Value = Var(p.genwildcard()) - } - } - rule.Head.SetLoc(p.s.Loc()) - - defer func() { - rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) - }() - - p.scan() - - switch p.s.tok { - case tokens.LBrace, tokens.If: // no value, but a body follows directly - rule.Head.generatedValue = true - rule.Head.Value = BooleanTerm(true) - case tokens.Assign, tokens.Unify: - rule.Head.Assign = tokens.Assign == p.s.tok - p.scan() - rule.Head.Value = p.parseTermInfixCall() - if rule.Head.Value == nil { - return nil - } - rule.Head.Location.Text = p.s.Text(rule.Head.Location.Offset, p.s.lastEnd) - default: - p.illegal("expected else value term or rule body") - return nil - } - - hasIf := p.s.tok == tokens.If - hasLBrace := p.s.tok == tokens.LBrace - - if !hasIf && !hasLBrace { - rule.Body = NewBody(NewExpr(BooleanTerm(true))) - rule.generatedBody = true - setLocRecursive(rule.Body, rule.Location) - return &rule - } - - if hasIf { - rule.Head.keywords = append(rule.Head.keywords, tokens.If) - p.scan() - } - - if p.s.tok == tokens.LBrace { - p.scan() - if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { - return nil - } - p.scan() - } else if p.s.tok != tokens.EOF { - expr := p.parseLiteral() - if expr == nil { - return nil - } - rule.Body.Append(expr) - setLocRecursive(rule.Body, rule.Location) - } else { - p.illegal("rule body expected") - return nil - } - - if p.s.tok == tokens.Else { - if rule.Else = p.parseElse(head); rule.Else == nil { - return nil - } - } - return &rule -} - -func (p *Parser) parseHead(defaultRule bool) (*Head, bool) { - head := &Head{} - loc := p.s.Loc() - defer func() { - if head != nil { - head.SetLoc(loc) - head.Location.Text = p.s.Text(head.Location.Offset, p.s.lastEnd) - } - }() - - term := p.parseVar() - if term == nil { - return nil, false - } - - ref := p.parseTermFinish(term, true) - if ref == nil { - p.illegal("expected rule head name") - return nil, false - } - - switch x := ref.Value.(type) { - case Var: - // Modify the code to add the location to the head ref - // and set the head ref's jsonOptions. - head = VarHead(x, ref.Location, p.po.JSONOptions) - case Ref: - head = RefHead(x) - case Call: - op, args := x[0], x[1:] - var ref Ref - switch y := op.Value.(type) { - case Var: - ref = Ref{op} - case Ref: - if _, ok := y[0].Value.(Var); !ok { - p.illegal("rule head ref %v invalid", y) - return nil, false - } - ref = y - } - head = RefHead(ref) - head.Args = append([]*Term{}, args...) - - default: - return nil, false - } - - name := head.Ref().String() - - switch p.s.tok { - case tokens.Contains: // NOTE: no Value for `contains` heads, we return here - // Catch error case of using 'contains' with a function definition rule head. - if head.Args != nil { - p.illegal("the contains keyword can only be used with multi-value rule definitions (e.g., %s contains { ... })", name) - } - p.scan() - head.Key = p.parseTermInfixCall() - if head.Key == nil { - p.illegal("expected rule key term (e.g., %s contains { ... })", name) - } - return head, true - - case tokens.Unify: - p.scan() - head.Value = p.parseTermInfixCall() - if head.Value == nil { - // FIX HEAD.String() - p.illegal("expected rule value term (e.g., %s[%s] = { ... })", name, head.Key) - } - case tokens.Assign: - p.scan() - head.Assign = true - head.Value = p.parseTermInfixCall() - if head.Value == nil { - switch { - case len(head.Args) > 0: - p.illegal("expected function value term (e.g., %s(...) := { ... })", name) - case head.Key != nil: - p.illegal("expected partial rule value term (e.g., %s[...] := { ... })", name) - case defaultRule: - p.illegal("expected default rule value term (e.g., default %s := )", name) - default: - p.illegal("expected rule value term (e.g., %s := { ... })", name) - } - } - } - - if head.Value == nil && head.Key == nil { - if len(head.Ref()) != 2 || len(head.Args) > 0 { - head.generatedValue = true - head.Value = BooleanTerm(true).SetLocation(head.Location) - } - } - return head, false -} - -func (p *Parser) parseBody(end tokens.Token) Body { - return p.parseQuery(false, end) -} - -func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body { - body := Body{} - - if p.s.tok == end { - p.error(p.s.Loc(), "found empty body") - return nil - } - - for { - expr := p.parseLiteral() - if expr == nil { - return nil - } - - body.Append(expr) - - if p.s.tok == tokens.Semicolon { - p.scan() - continue - } - - if p.s.tok == end || requireSemi { - return body - } - - if !p.s.skippedNL { - // If there was already an error then don't pile this one on - if len(p.s.errors) == 0 { - p.illegal(`expected \n or %s or %s`, tokens.Semicolon, end) - } - return nil - } - } -} - -func (p *Parser) parseLiteral() (expr *Expr) { - - offset := p.s.loc.Offset - loc := p.s.Loc() - - defer func() { - if expr != nil { - loc.Text = p.s.Text(offset, p.s.lastEnd) - expr.SetLoc(loc) - } - }() - - var negated bool - if p.s.tok == tokens.Not { - p.scan() - negated = true - } - - switch p.s.tok { - case tokens.Some: - if negated { - p.illegal("illegal negation of 'some'") - return nil - } - return p.parseSome() - case tokens.Every: - if negated { - p.illegal("illegal negation of 'every'") - return nil - } - return p.parseEvery() - default: - s := p.save() - expr := p.parseExpr() - if expr != nil { - expr.Negated = negated - if p.s.tok == tokens.With { - if expr.With = p.parseWith(); expr.With == nil { - return nil - } - } - // If we find a plain `every` identifier, attempt to parse an every expression, - // add hint if it succeeds. - if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { - var hint bool - t := p.save() - p.restore(s) - if expr := p.futureParser().parseEvery(); expr != nil { - _, hint = expr.Terms.(*Every) - } - p.restore(t) - if hint { - p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") - } - } - return expr - } - return nil - } -} - -func (p *Parser) parseWith() []*With { - - withs := []*With{} - - for { - - with := With{ - Location: p.s.Loc(), - } - p.scan() - - if p.s.tok != tokens.Ident { - p.illegal("expected ident") - return nil - } - - with.Target = p.parseTerm() - if with.Target == nil { - return nil - } - - switch with.Target.Value.(type) { - case Ref, Var: - break - default: - p.illegal("expected with target path") - } - - if p.s.tok != tokens.As { - p.illegal("expected as keyword") - return nil - } - - p.scan() - - if with.Value = p.parseTermInfixCall(); with.Value == nil { - return nil - } - - with.Location.Text = p.s.Text(with.Location.Offset, p.s.lastEnd) - - withs = append(withs, &with) - - if p.s.tok != tokens.With { - break - } - } - - return withs -} - -func (p *Parser) parseSome() *Expr { - - decl := &SomeDecl{} - decl.SetLoc(p.s.Loc()) - - // Attempt to parse "some x in xs", which will end up in - // SomeDecl{Symbols: ["member(x, xs)"]} - s := p.save() - p.scan() - if term := p.parseTermInfixCall(); term != nil { - if call, ok := term.Value.(Call); ok { - switch call[0].String() { - case Member.Name: - if len(call) != 3 { - p.illegal("illegal domain") - return nil - } - case MemberWithKey.Name: - if len(call) != 4 { - p.illegal("illegal domain") - return nil - } - default: - p.illegal("expected `x in xs` or `x, y in xs` expression") - return nil - } - - decl.Symbols = []*Term{term} - expr := NewExpr(decl).SetLocation(decl.Location) - if p.s.tok == tokens.With { - if expr.With = p.parseWith(); expr.With == nil { - return nil - } - } - return expr - } - } - - p.restore(s) - s = p.save() // new copy for later - var hint bool - p.scan() - if term := p.futureParser().parseTermInfixCall(); term != nil { - if call, ok := term.Value.(Call); ok { - switch call[0].String() { - case Member.Name, MemberWithKey.Name: - hint = true - } - } - } - - // go on as before, it's `some x[...]` or illegal - p.restore(s) - if hint { - p.hint("`import future.keywords.in` for `some x in xs` expressions") - } - - for { // collecting var args - - p.scan() - - if p.s.tok != tokens.Ident { - p.illegal("expected var") - return nil - } - - decl.Symbols = append(decl.Symbols, p.parseVar()) - - p.scan() - - if p.s.tok != tokens.Comma { - break - } - } - - return NewExpr(decl).SetLocation(decl.Location) -} - -func (p *Parser) parseEvery() *Expr { - qb := &Every{} - qb.SetLoc(p.s.Loc()) - - // TODO(sr): We'd get more accurate error messages if we didn't rely on - // parseTermInfixCall here, but parsed "var [, var] in term" manually. - p.scan() - term := p.parseTermInfixCall() - if term == nil { - return nil - } - call, ok := term.Value.(Call) - if !ok { - p.illegal("expected `x[, y] in xs { ... }` expression") - return nil - } - switch call[0].String() { - case Member.Name: // x in xs - if len(call) != 3 { - p.illegal("illegal domain") - return nil - } - qb.Value = call[1] - qb.Domain = call[2] - case MemberWithKey.Name: // k, v in xs - if len(call) != 4 { - p.illegal("illegal domain") - return nil - } - qb.Key = call[1] - qb.Value = call[2] - qb.Domain = call[3] - if _, ok := qb.Key.Value.(Var); !ok { - p.illegal("expected key to be a variable") - return nil - } - default: - p.illegal("expected `x[, y] in xs { ... }` expression") - return nil - } - if _, ok := qb.Value.Value.(Var); !ok { - p.illegal("expected value to be a variable") - return nil - } - if p.s.tok == tokens.LBrace { // every x in xs { ... } - p.scan() - body := p.parseBody(tokens.RBrace) - if body == nil { - return nil - } - p.scan() - qb.Body = body - expr := NewExpr(qb).SetLocation(qb.Location) - - if p.s.tok == tokens.With { - if expr.With = p.parseWith(); expr.With == nil { - return nil - } - } - return expr - } - - p.illegal("missing body") - return nil -} - -func (p *Parser) parseExpr() *Expr { - - lhs := p.parseTermInfixCall() - if lhs == nil { - return nil - } - - if op := p.parseTermOp(tokens.Assign, tokens.Unify); op != nil { - if rhs := p.parseTermInfixCall(); rhs != nil { - return NewExpr([]*Term{op, lhs, rhs}) - } - return nil - } - - // NOTE(tsandall): the top-level call term is converted to an expr because - // the evaluator does not support the call term type (nested calls are - // rewritten by the compiler.) - if call, ok := lhs.Value.(Call); ok { - return NewExpr([]*Term(call)) - } - - return NewExpr(lhs) -} - -// parseTermInfixCall consumes the next term from the input and returns it. If a -// term cannot be parsed the return value is nil and error will be recorded. The -// scanner will be advanced to the next token before returning. -// By starting out with infix relations (==, !=, <, etc) and further calling the -// other binary operators (|, &, arithmetics), it constitutes the binding -// precedence. -func (p *Parser) parseTermInfixCall() *Term { - return p.parseTermIn(nil, true, p.s.loc.Offset) -} - -func (p *Parser) parseTermInfixCallInList() *Term { - return p.parseTermIn(nil, false, p.s.loc.Offset) -} - -func (p *Parser) parseTermIn(lhs *Term, keyVal bool, offset int) *Term { - // NOTE(sr): `in` is a bit special: besides `lhs in rhs`, it also - // supports `key, val in rhs`, so it can have an optional second lhs. - // `keyVal` triggers if we attempt to parse a second lhs argument (`mhs`). - if lhs == nil { - lhs = p.parseTermRelation(nil, offset) - } - if lhs != nil { - if keyVal && p.s.tok == tokens.Comma { // second "lhs", or "middle hand side" - s := p.save() - p.scan() - if mhs := p.parseTermRelation(nil, offset); mhs != nil { - if op := p.parseTermOpName(MemberWithKey.Ref(), tokens.In); op != nil { - if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, mhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.In: - return p.parseTermIn(call, keyVal, offset) - default: - return call - } - } - } - } - p.restore(s) - } - if op := p.parseTermOpName(Member.Ref(), tokens.In); op != nil { - if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.In: - return p.parseTermIn(call, keyVal, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTermRelation(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermOr(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte); op != nil { - if rhs := p.parseTermOr(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte: - return p.parseTermRelation(call, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTermOr(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermAnd(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.Or); op != nil { - if rhs := p.parseTermAnd(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Or: - return p.parseTermOr(call, offset) - default: - return call - } - } - } - return lhs - } - return nil -} - -func (p *Parser) parseTermAnd(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermArith(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.And); op != nil { - if rhs := p.parseTermArith(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.And: - return p.parseTermAnd(call, offset) - default: - return call - } - } - } - return lhs - } - return nil -} - -func (p *Parser) parseTermArith(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermFactor(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.Add, tokens.Sub); op != nil { - if rhs := p.parseTermFactor(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Add, tokens.Sub: - return p.parseTermArith(call, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTermFactor(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTerm() - } - if lhs != nil { - if op := p.parseTermOp(tokens.Mul, tokens.Quo, tokens.Rem); op != nil { - if rhs := p.parseTerm(); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Mul, tokens.Quo, tokens.Rem: - return p.parseTermFactor(call, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTerm() *Term { - if term, s := p.parsedTermCacheLookup(); s != nil { - p.restore(s) - return term - } - s0 := p.save() - - var term *Term - switch p.s.tok { - case tokens.Null: - term = NullTerm().SetLocation(p.s.Loc()) - case tokens.True: - term = BooleanTerm(true).SetLocation(p.s.Loc()) - case tokens.False: - term = BooleanTerm(false).SetLocation(p.s.Loc()) - case tokens.Sub, tokens.Dot, tokens.Number: - term = p.parseNumber() - case tokens.String: - term = p.parseString() - case tokens.Ident, tokens.Contains: // NOTE(sr): contains anywhere BUT in rule heads gets no special treatment - term = p.parseVar() - case tokens.LBrack: - term = p.parseArray() - case tokens.LBrace: - term = p.parseSetOrObject() - case tokens.LParen: - offset := p.s.loc.Offset - p.scan() - if r := p.parseTermInfixCall(); r != nil { - if p.s.tok == tokens.RParen { - r.Location.Text = p.s.Text(offset, p.s.tokEnd) - term = r - } else { - p.error(p.s.Loc(), "non-terminated expression") - } - } - default: - p.illegalToken() - } - - term = p.parseTermFinish(term, false) - p.parsedTermCachePush(term, s0) - return term -} - -func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term { - if head == nil { - return nil - } - offset := p.s.loc.Offset - p.doScan(skipws) - - switch p.s.tok { - case tokens.LParen, tokens.Dot, tokens.LBrack: - return p.parseRef(head, offset) - case tokens.Whitespace: - p.scan() - fallthrough - default: - if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) { - return RefTerm(head).SetLocation(head.Location) - } - return head - } -} - -func (p *Parser) parseNumber() *Term { - var prefix string - loc := p.s.Loc() - if p.s.tok == tokens.Sub { - prefix = "-" - p.scan() - switch p.s.tok { - case tokens.Number, tokens.Dot: - break - default: - p.illegal("expected number") - return nil - } - } - if p.s.tok == tokens.Dot { - prefix += "." - p.scan() - if p.s.tok != tokens.Number { - p.illegal("expected number") - return nil - } - } - - // Check for multiple leading 0's, parsed by math/big.Float.Parse as decimal 0: - // https://golang.org/pkg/math/big/#Float.Parse - if ((len(prefix) != 0 && prefix[0] == '-') || len(prefix) == 0) && - len(p.s.lit) > 1 && p.s.lit[0] == '0' && p.s.lit[1] == '0' { - p.illegal("expected number") - return nil - } - - // Ensure that the number is valid - s := prefix + p.s.lit - f, ok := new(big.Float).SetString(s) - if !ok { - p.illegal("invalid float") - return nil - } - - // Put limit on size of exponent to prevent non-linear cost of String() - // function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068 - // - // n == sign * mantissa * 2^exp - // 0.5 <= mantissa < 1.0 - // - // The limit is arbitrary. - exp := f.MantExp(nil) - if exp > 1e5 || exp < -1e5 || f.IsInf() { // +/- inf, exp is 0 - p.error(p.s.Loc(), "number too big") - return nil - } - - // Note: Use the original string, do *not* round trip from - // the big.Float as it can cause precision loss. - r := NumberTerm(json.Number(s)).SetLocation(loc) - return r -} - -func (p *Parser) parseString() *Term { - if p.s.lit[0] == '"' { - var s string - err := json.Unmarshal([]byte(p.s.lit), &s) - if err != nil { - p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit) - return nil - } - term := StringTerm(s).SetLocation(p.s.Loc()) - return term - } - return p.parseRawString() -} - -func (p *Parser) parseRawString() *Term { - if len(p.s.lit) < 2 { - return nil - } - term := StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc()) - return term -} - -// this is the name to use for instantiating an empty set, e.g., `set()`. -var setConstructor = RefTerm(VarTerm("set")) - -func (p *Parser) parseCall(operator *Term, offset int) (term *Term) { - - loc := operator.Location - var end int - - defer func() { - p.setLoc(term, loc, offset, end) - }() - - p.scan() // steps over '(' - - if p.s.tok == tokens.RParen { // no args, i.e. set() or any.func() - end = p.s.tokEnd - p.scanWS() - if operator.Equal(setConstructor) { - return SetTerm() - } - return CallTerm(operator) - } - - if r := p.parseTermList(tokens.RParen, []*Term{operator}); r != nil { - end = p.s.tokEnd - p.scanWS() - return CallTerm(r...) - } - - return nil -} - -func (p *Parser) parseRef(head *Term, offset int) (term *Term) { - - loc := head.Location - var end int - - defer func() { - p.setLoc(term, loc, offset, end) - }() - - switch h := head.Value.(type) { - case Var, *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: - // ok - default: - p.errorf(loc, "illegal ref (head cannot be %v)", TypeName(h)) - } - - ref := []*Term{head} - - for { - switch p.s.tok { - case tokens.Dot: - p.scanWS() - if p.s.tok != tokens.Ident { - p.illegal("expected %v", tokens.Ident) - return nil - } - ref = append(ref, StringTerm(p.s.lit).SetLocation(p.s.Loc())) - p.scanWS() - case tokens.LParen: - term = p.parseCall(p.setLoc(RefTerm(ref...), loc, offset, p.s.loc.Offset), offset) - if term != nil { - switch p.s.tok { - case tokens.Whitespace: - p.scan() - end = p.s.lastEnd - return term - case tokens.Dot, tokens.LBrack: - term = p.parseRef(term, offset) - } - } - end = p.s.tokEnd - return term - case tokens.LBrack: - p.scan() - if term := p.parseTermInfixCall(); term != nil { - if p.s.tok != tokens.RBrack { - p.illegal("expected %v", tokens.LBrack) - return nil - } - ref = append(ref, term) - p.scanWS() - } else { - return nil - } - case tokens.Whitespace: - end = p.s.lastEnd - p.scan() - return RefTerm(ref...) - default: - end = p.s.lastEnd - return RefTerm(ref...) - } - } -} - -func (p *Parser) parseArray() (term *Term) { - - loc := p.s.Loc() - offset := p.s.loc.Offset - - defer func() { - p.setLoc(term, loc, offset, p.s.tokEnd) - }() - - p.scan() - - if p.s.tok == tokens.RBrack { - return ArrayTerm() - } - - potentialComprehension := true - - // Skip leading commas, eg [, x, y] - // Supported for backwards compatibility. In the future - // we should make this a parse error. - if p.s.tok == tokens.Comma { - potentialComprehension = false - p.scan() - } - - s := p.save() - - // NOTE(tsandall): The parser cannot attempt a relational term here because - // of ambiguity around comprehensions. For example, given: - // - // {1 | 1} - // - // Does this represent a set comprehension or a set containing binary OR - // call? We resolve the ambiguity by prioritizing comprehensions. - head := p.parseTerm() - - if head == nil { - return nil - } - - switch p.s.tok { - case tokens.RBrack: - return ArrayTerm(head) - case tokens.Comma: - p.scan() - if terms := p.parseTermList(tokens.RBrack, []*Term{head}); terms != nil { - return NewTerm(NewArray(terms...)) - } - return nil - case tokens.Or: - if potentialComprehension { - // Try to parse as if it is an array comprehension - p.scan() - if body := p.parseBody(tokens.RBrack); body != nil { - return ArrayComprehensionTerm(head, body) - } - if p.s.tok != tokens.Comma { - return nil - } - } - // fall back to parsing as a normal array definition - } - - p.restore(s) - - if terms := p.parseTermList(tokens.RBrack, nil); terms != nil { - return NewTerm(NewArray(terms...)) - } - return nil -} - -func (p *Parser) parseSetOrObject() (term *Term) { - loc := p.s.Loc() - offset := p.s.loc.Offset - - defer func() { - p.setLoc(term, loc, offset, p.s.tokEnd) - }() - - p.scan() - - if p.s.tok == tokens.RBrace { - return ObjectTerm() - } - - potentialComprehension := true - - // Skip leading commas, eg {, x, y} - // Supported for backwards compatibility. In the future - // we should make this a parse error. - if p.s.tok == tokens.Comma { - potentialComprehension = false - p.scan() - } - - s := p.save() - - // Try parsing just a single term first to give comprehensions higher - // priority to "or" calls in ambiguous situations. Eg: { a | b } - // will be a set comprehension. - // - // Note: We don't know yet if it is a set or object being defined. - head := p.parseTerm() - if head == nil { - return nil - } - - switch p.s.tok { - case tokens.Or: - if potentialComprehension { - return p.parseSet(s, head, potentialComprehension) - } - case tokens.RBrace, tokens.Comma: - return p.parseSet(s, head, potentialComprehension) - case tokens.Colon: - return p.parseObject(head, potentialComprehension) - } - - p.restore(s) - - head = p.parseTermInfixCallInList() - if head == nil { - return nil - } - - switch p.s.tok { - case tokens.RBrace, tokens.Comma: - return p.parseSet(s, head, false) - case tokens.Colon: - // It still might be an object comprehension, eg { a+1: b | ... } - return p.parseObject(head, potentialComprehension) - } - - p.illegal("non-terminated set") - return nil -} - -func (p *Parser) parseSet(s *state, head *Term, potentialComprehension bool) *Term { - switch p.s.tok { - case tokens.RBrace: - return SetTerm(head) - case tokens.Comma: - p.scan() - if terms := p.parseTermList(tokens.RBrace, []*Term{head}); terms != nil { - return SetTerm(terms...) - } - case tokens.Or: - if potentialComprehension { - // Try to parse as if it is a set comprehension - p.scan() - if body := p.parseBody(tokens.RBrace); body != nil { - return SetComprehensionTerm(head, body) - } - if p.s.tok != tokens.Comma { - return nil - } - } - // Fall back to parsing as normal set definition - p.restore(s) - if terms := p.parseTermList(tokens.RBrace, nil); terms != nil { - return SetTerm(terms...) - } - } - return nil -} - -func (p *Parser) parseObject(k *Term, potentialComprehension bool) *Term { - // NOTE(tsandall): Assumption: this function is called after parsing the key - // of the head element and then receiving a colon token from the scanner. - // Advance beyond the colon and attempt to parse an object. - if p.s.tok != tokens.Colon { - panic("expected colon") - } - p.scan() - - s := p.save() - - // NOTE(sr): We first try to parse the value as a term (`v`), and see - // if we can parse `{ x: v | ...}` as a comprehension. - // However, if we encounter either a Comma or an RBace, it cannot be - // parsed as a comprehension -- so we save double work further down - // where `parseObjectFinish(k, v, false)` would only exercise the - // same code paths once more. - v := p.parseTerm() - if v == nil { - return nil - } - - potentialRelation := true - if potentialComprehension { - switch p.s.tok { - case tokens.RBrace, tokens.Comma: - potentialRelation = false - fallthrough - case tokens.Or: - if term := p.parseObjectFinish(k, v, true); term != nil { - return term - } - } - } - - p.restore(s) - - if potentialRelation { - v := p.parseTermInfixCallInList() - if v == nil { - return nil - } - - switch p.s.tok { - case tokens.RBrace, tokens.Comma: - return p.parseObjectFinish(k, v, false) - } - } - - p.illegal("non-terminated object") - return nil -} - -func (p *Parser) parseObjectFinish(key, val *Term, potentialComprehension bool) *Term { - switch p.s.tok { - case tokens.RBrace: - return ObjectTerm([2]*Term{key, val}) - case tokens.Or: - if potentialComprehension { - p.scan() - if body := p.parseBody(tokens.RBrace); body != nil { - return ObjectComprehensionTerm(key, val, body) - } - } else { - p.illegal("non-terminated object") - } - case tokens.Comma: - p.scan() - if r := p.parseTermPairList(tokens.RBrace, [][2]*Term{{key, val}}); r != nil { - return ObjectTerm(r...) - } - } - return nil -} - -func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term { - if p.s.tok == end { - return r - } - for { - term := p.parseTermInfixCallInList() - if term != nil { - r = append(r, term) - switch p.s.tok { - case end: - return r - case tokens.Comma: - p.scan() - if p.s.tok == end { - return r - } - continue - default: - p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) - return nil - } - } - return nil - } -} - -func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term { - if p.s.tok == end { - return r - } - for { - key := p.parseTermInfixCallInList() - if key != nil { - switch p.s.tok { - case tokens.Colon: - p.scan() - if val := p.parseTermInfixCallInList(); val != nil { - r = append(r, [2]*Term{key, val}) - switch p.s.tok { - case end: - return r - case tokens.Comma: - p.scan() - if p.s.tok == end { - return r - } - continue - default: - p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) - return nil - } - } - default: - p.illegal(fmt.Sprintf("expected %q", tokens.Colon)) - return nil - } - } - return nil - } -} - -func (p *Parser) parseTermOp(values ...tokens.Token) *Term { - for i := range values { - if p.s.tok == values[i] { - r := RefTerm(VarTerm(fmt.Sprint(p.s.tok)).SetLocation(p.s.Loc())).SetLocation(p.s.Loc()) - p.scan() - return r - } - } - return nil -} - -func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term { - for i := range values { - if p.s.tok == values[i] { - for _, r := range ref { - r.SetLocation(p.s.Loc()) - } - t := RefTerm(ref...) - t.SetLocation(p.s.Loc()) - p.scan() - return t - } - } - return nil -} - -func (p *Parser) parseVar() *Term { - - s := p.s.lit - - term := VarTerm(s).SetLocation(p.s.Loc()) - - // Update wildcard values with unique identifiers - if term.Equal(Wildcard) { - term.Value = Var(p.genwildcard()) - } - - return term -} - -func (p *Parser) genwildcard() string { - c := p.s.wildcard - p.s.wildcard++ - return fmt.Sprintf("%v%d", WildcardPrefix, c) -} - -func (p *Parser) error(loc *location.Location, reason string) { - p.errorf(loc, reason) -} - -func (p *Parser) errorf(loc *location.Location, f string, a ...interface{}) { - msg := strings.Builder{} - msg.WriteString(fmt.Sprintf(f, a...)) - - switch len(p.s.hints) { - case 0: // nothing to do - case 1: - msg.WriteString(" (hint: ") - msg.WriteString(p.s.hints[0]) - msg.WriteRune(')') - default: - msg.WriteString(" (hints: ") - for i, h := range p.s.hints { - if i > 0 { - msg.WriteString(", ") - } - msg.WriteString(h) - } - msg.WriteRune(')') - } - - p.s.errors = append(p.s.errors, &Error{ - Code: ParseErr, - Message: msg.String(), - Location: loc, - Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset), - }) - p.s.hints = nil -} - -func (p *Parser) hint(f string, a ...interface{}) { - p.s.hints = append(p.s.hints, fmt.Sprintf(f, a...)) -} - -func (p *Parser) illegal(note string, a ...interface{}) { - tok := p.s.tok.String() - - if p.s.tok == tokens.Illegal { - p.errorf(p.s.Loc(), "illegal token") - return - } - - tokType := "token" - if tokens.IsKeyword(p.s.tok) { - tokType = "keyword" - } - if _, ok := futureKeywords[p.s.tok.String()]; ok { - tokType = "keyword" - } - - note = fmt.Sprintf(note, a...) - if len(note) > 0 { - p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, note) - } else { - p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType) - } -} - -func (p *Parser) illegalToken() { - p.illegal("") -} - -func (p *Parser) scan() { - p.doScan(true) -} - -func (p *Parser) scanWS() { - p.doScan(false) -} - -func (p *Parser) doScan(skipws bool) { - - // NOTE(tsandall): the last position is used to compute the "text" field for - // complex AST nodes. Whitespace never affects the last position of an AST - // node so do not update it when scanning. - if p.s.tok != tokens.Whitespace { - p.s.lastEnd = p.s.tokEnd - p.s.skippedNL = false - } - - var errs []scanner.Error - for { - var pos scanner.Position - p.s.tok, pos, p.s.lit, errs = p.s.s.Scan() - - p.s.tokEnd = pos.End - p.s.loc.Row = pos.Row - p.s.loc.Col = pos.Col - p.s.loc.Offset = pos.Offset - p.s.loc.Text = p.s.Text(pos.Offset, pos.End) - p.s.loc.Tabs = pos.Tabs - - for _, err := range errs { - p.error(p.s.Loc(), err.Message) - } - - if len(errs) > 0 { - p.s.tok = tokens.Illegal - } - - if p.s.tok == tokens.Whitespace { - if p.s.lit == "\n" { - p.s.skippedNL = true - } - if skipws { - continue - } - } - - if p.s.tok != tokens.Comment { - break - } - - // For backwards compatibility leave a nil - // Text value if there is no text rather than - // an empty string. - var commentText []byte - if len(p.s.lit) > 1 { - commentText = []byte(p.s.lit[1:]) - } - comment := NewComment(commentText) - comment.SetLoc(p.s.Loc()) - p.s.comments = append(p.s.comments, comment) - } -} - -func (p *Parser) save() *state { - cpy := *p.s - s := *cpy.s - cpy.s = &s - return &cpy -} - -func (p *Parser) restore(s *state) { - p.s = s -} - -func setLocRecursive(x interface{}, loc *location.Location) { - NewGenericVisitor(func(x interface{}) bool { - if node, ok := x.(Node); ok { - node.SetLoc(loc) - } - return false - }).Walk(x) -} - -func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term { - if term != nil { - cpy := *loc - term.Location = &cpy - term.Location.Text = p.s.Text(offset, end) - } - return term -} - -func (p *Parser) validateDefaultRuleValue(rule *Rule) bool { - if rule.Head.Value == nil { - p.error(rule.Loc(), "illegal default rule (must have a value)") - return false - } - - valid := true - vis := NewGenericVisitor(func(x interface{}) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures - return true - case Ref, Var, Call: - p.error(rule.Loc(), fmt.Sprintf("illegal default rule (value cannot contain %v)", TypeName(x))) - valid = false - return true - } - return false - }) - - vis.Walk(rule.Head.Value.Value) - return valid -} - -func (p *Parser) validateDefaultRuleArgs(rule *Rule) bool { - - valid := true - vars := NewVarSet() - - vis := NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case Var: - if vars.Contains(x) { - p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot be repeated %v)", x)) - valid = false - return true - } - vars.Add(x) - - case *Term: - switch v := x.Value.(type) { - case Var: // do nothing - default: - p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot contain %v)", TypeName(v))) - valid = false - return true - } - } - - return false - }) - - vis.Walk(rule.Head.Args) - return valid -} - -// We explicitly use yaml unmarshalling, to accommodate for the '_' in 'related_resources', -// which isn't handled properly by json for some reason. -type rawAnnotation struct { - Scope string `yaml:"scope"` - Title string `yaml:"title"` - Entrypoint bool `yaml:"entrypoint"` - Description string `yaml:"description"` - Organizations []string `yaml:"organizations"` - RelatedResources []interface{} `yaml:"related_resources"` - Authors []interface{} `yaml:"authors"` - Schemas []map[string]any `yaml:"schemas"` - Custom map[string]interface{} `yaml:"custom"` -} - -type metadataParser struct { - buf *bytes.Buffer - comments []*Comment - loc *location.Location -} - -func newMetadataParser(loc *Location) *metadataParser { - return &metadataParser{loc: loc, buf: bytes.NewBuffer(nil)} -} - -func (b *metadataParser) Append(c *Comment) { - b.buf.Write(bytes.TrimPrefix(c.Text, []byte(" "))) - b.buf.WriteByte('\n') - b.comments = append(b.comments, c) -} - -var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`) - -func (b *metadataParser) Parse() (*Annotations, error) { - - var raw rawAnnotation - - if len(bytes.TrimSpace(b.buf.Bytes())) == 0 { - return nil, fmt.Errorf("expected METADATA block, found whitespace") - } - - if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil { - var comment *Comment - match := yamlLineErrRegex.FindStringSubmatch(err.Error()) - if len(match) == 2 { - index, err2 := strconv.Atoi(match[1]) - if err2 == nil { - if index >= len(b.comments) { - comment = b.comments[len(b.comments)-1] - } else { - comment = b.comments[index] - } - b.loc = comment.Location - } - } - - if match == nil && len(b.comments) > 0 { - b.loc = b.comments[0].Location - } - - return nil, augmentYamlError(err, b.comments) - } - - var result Annotations - result.comments = b.comments - result.Scope = raw.Scope - result.Entrypoint = raw.Entrypoint - result.Title = raw.Title - result.Description = raw.Description - result.Organizations = raw.Organizations - - for _, v := range raw.RelatedResources { - rr, err := parseRelatedResource(v) - if err != nil { - return nil, fmt.Errorf("invalid related-resource definition %s: %w", v, err) - } - result.RelatedResources = append(result.RelatedResources, rr) - } - - for _, pair := range raw.Schemas { - k, v := unwrapPair(pair) - - var a SchemaAnnotation - var err error - - a.Path, err = ParseRef(k) - if err != nil { - return nil, fmt.Errorf("invalid document reference") - } - - switch v := v.(type) { - case string: - a.Schema, err = parseSchemaRef(v) - if err != nil { - return nil, err - } - case map[string]any: - w, err := convertYAMLMapKeyTypes(v, nil) - if err != nil { - return nil, fmt.Errorf("invalid schema definition: %w", err) - } - a.Definition = &w - default: - return nil, fmt.Errorf("invalid schema declaration for path %q", k) - } - - result.Schemas = append(result.Schemas, &a) - } - - for _, v := range raw.Authors { - author, err := parseAuthor(v) - if err != nil { - return nil, fmt.Errorf("invalid author definition %s: %w", v, err) - } - result.Authors = append(result.Authors, author) - } - - result.Custom = make(map[string]interface{}) - for k, v := range raw.Custom { - val, err := convertYAMLMapKeyTypes(v, nil) - if err != nil { - return nil, err - } - result.Custom[k] = val - } - - result.Location = b.loc - - // recreate original text of entire metadata block for location text attribute - sb := strings.Builder{} - sb.WriteString("# METADATA\n") - - lines := bytes.Split(b.buf.Bytes(), []byte{'\n'}) - - for _, line := range lines[:len(lines)-1] { - sb.WriteString("# ") - sb.Write(line) - sb.WriteByte('\n') - } - - result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n")) - - return &result, nil -} - -// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise -// cryptic error. These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed -// to be correct. -func augmentYamlError(err error, comments []*Comment) error { - // Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol - for _, comment := range comments { - txt := string(comment.Text) - parts := strings.Split(txt, ":") - if len(parts) > 1 { - parts = parts[1:] - var invalidSpaces []string - for partIndex, part := range parts { - if len(part) == 0 && partIndex == len(parts)-1 { - invalidSpaces = []string{} - break - } - - r, _ := utf8.DecodeRuneInString(part) - if r == ' ' || r == '\t' { - invalidSpaces = []string{} - break - } - - invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r)) - } - if len(invalidSpaces) > 0 { - err = fmt.Errorf( - "%s\n Hint: on line %d, symbol(s) %v immediately following a key/value separator ':' is not a legal yaml space character", - err.Error(), comment.Location.Row, invalidSpaces) - } - } - } - return err -} - -func unwrapPair(pair map[string]interface{}) (string, interface{}) { - for k, v := range pair { - return k, v - } - return "", nil -} - -var errInvalidSchemaRef = fmt.Errorf("invalid schema reference") - -// NOTE(tsandall): 'schema' is not registered as a root because it's not -// supported by the compiler or evaluator today. Once we fix that, we can remove -// this function. -func parseSchemaRef(s string) (Ref, error) { - - term, err := ParseTerm(s) - if err == nil { - switch v := term.Value.(type) { - case Var: - if term.Equal(SchemaRootDocument) { - return SchemaRootRef.Copy(), nil - } - case Ref: - if v.HasPrefix(SchemaRootRef) { - return v, nil - } - } - } - - return nil, errInvalidSchemaRef -} - -func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) { - rr, err := convertYAMLMapKeyTypes(rr, nil) - if err != nil { - return nil, err - } - - switch rr := rr.(type) { - case string: - if len(rr) > 0 { - u, err := url.Parse(rr) - if err != nil { - return nil, err - } - return &RelatedResourceAnnotation{Ref: *u}, nil - } - return nil, fmt.Errorf("ref URL may not be empty string") - case map[string]interface{}: - description := strings.TrimSpace(getSafeString(rr, "description")) - ref := strings.TrimSpace(getSafeString(rr, "ref")) - if len(ref) > 0 { - u, err := url.Parse(ref) - if err != nil { - return nil, err - } - return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil - } - return nil, fmt.Errorf("'ref' value required in object") - } - - return nil, fmt.Errorf("invalid value type, must be string or map") -} - -func parseAuthor(a interface{}) (*AuthorAnnotation, error) { - a, err := convertYAMLMapKeyTypes(a, nil) - if err != nil { - return nil, err - } - - switch a := a.(type) { - case string: - return parseAuthorString(a) - case map[string]interface{}: - name := strings.TrimSpace(getSafeString(a, "name")) - email := strings.TrimSpace(getSafeString(a, "email")) - if len(name) > 0 || len(email) > 0 { - return &AuthorAnnotation{name, email}, nil - } - return nil, fmt.Errorf("'name' and/or 'email' values required in object") - } - - return nil, fmt.Errorf("invalid value type, must be string or map") -} - -func getSafeString(m map[string]interface{}, k string) string { - if v, found := m[k]; found { - if s, ok := v.(string); ok { - return s - } - } - return "" -} - -const emailPrefix = "<" -const emailSuffix = ">" - -// parseAuthor parses a string into an AuthorAnnotation. If the last word of the input string is enclosed within <>, -// it is extracted as the author's email. The email may not contain whitelines, as it then will be interpreted as -// multiple words. -func parseAuthorString(s string) (*AuthorAnnotation, error) { - parts := strings.Fields(s) - - if len(parts) == 0 { - return nil, fmt.Errorf("author is an empty string") - } - - namePartCount := len(parts) - trailing := parts[namePartCount-1] - var email string - if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) && - strings.HasSuffix(trailing, emailSuffix) { - email = trailing[len(emailPrefix):] - email = email[0 : len(email)-len(emailSuffix)] - namePartCount = namePartCount - 1 - } - - name := strings.Join(parts[0:namePartCount], " ") - - return &AuthorAnnotation{Name: name, Email: email}, nil -} - -func convertYAMLMapKeyTypes(x any, path []string) (any, error) { - var err error - switch x := x.(type) { - case map[any]any: - result := make(map[string]any, len(x)) - for k, v := range x { - str, ok := k.(string) - if !ok { - return nil, fmt.Errorf("invalid map key type(s): %v", strings.Join(path, "/")) - } - result[str], err = convertYAMLMapKeyTypes(v, append(path, str)) - if err != nil { - return nil, err - } - } - return result, nil - case []any: - for i := range x { - x[i], err = convertYAMLMapKeyTypes(x[i], append(path, fmt.Sprintf("%d", i))) - if err != nil { - return nil, err - } - } - return x, nil - default: - return x, nil - } -} - -// futureKeywords is the source of truth for future keywords that will -// eventually become standard keywords inside of Rego. -var futureKeywords = map[string]tokens.Token{ - "in": tokens.In, - "every": tokens.Every, - "contains": tokens.Contains, - "if": tokens.If, + return v1.NewParser().WithRegoVersion(DefaultRegoVersion) } func IsFutureKeyword(s string) bool { - _, ok := futureKeywords[s] - return ok -} - -func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { - path := imp.Path.Value.(Ref) - - if len(path) == 1 || !path[1].Equal(StringTerm("keywords")) { - p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`") - return - } - - if imp.Alias != "" { - p.errorf(imp.Path.Location, "`future` imports cannot be aliased") - return - } - - if p.s.s.RegoV1Compatible() { - p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef) - return - } - - kwds := make([]string, 0, len(allowedFutureKeywords)) - for k := range allowedFutureKeywords { - kwds = append(kwds, k) - } - - switch len(path) { - case 2: // all keywords imported, nothing to do - case 3: // one keyword imported - kw, ok := path[2].Value.(String) - if !ok { - p.errorf(imp.Path.Location, "invalid import, must be `future.keywords.x`, e.g. `import future.keywords.in`") - return - } - keyword := string(kw) - _, ok = allowedFutureKeywords[keyword] - if !ok { - sort.Strings(kwds) // so the error message is stable - p.errorf(imp.Path.Location, "unexpected keyword, must be one of %v", kwds) - return - } - - kwds = []string{keyword} // overwrite - } - for _, kw := range kwds { - p.s.s.AddKeyword(kw, allowedFutureKeywords[kw]) - } -} - -func (p *Parser) regoV1Import(imp *Import) { - if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) { - p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef) - return - } - - path := imp.Path.Value.(Ref) - - // v1 is only valid option - if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 { - p.errorf(imp.Path.Location, "invalid import `%s`, must be `%s`", path, RegoV1CompatibleRef) - return - } - - if p.po.RegoVersion == RegoV1 { - // We're parsing for Rego v1, where the 'rego.v1' import is a no-op. - return - } - - if imp.Alias != "" { - p.errorf(imp.Path.Location, "`rego` imports cannot be aliased") - return - } - - // import all future keywords with the rego.v1 import - kwds := make([]string, 0, len(futureKeywords)) - for k := range futureKeywords { - kwds = append(kwds, k) - } - - if p.s.s.HasKeyword(futureKeywords) && !p.s.s.RegoV1Compatible() { - // We have imported future keywords, but they didn't come from another `rego.v1` import. - p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef) - return - } - - p.s.s.SetRegoV1Compatible() - for _, kw := range kwds { - p.s.s.AddKeyword(kw, futureKeywords[kw]) - } + return v1.IsFutureKeywordForRegoVersion(s, RegoV0) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go index 83c87e47b18..2d596169321 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go +++ b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go @@ -1,24 +1,14 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// This file contains extra functions for parsing Rego. -// Most of the parsing is handled by the code in parser.go, -// however, there are additional utilities that are -// helpful for dealing with Rego source inputs (e.g., REPL -// statements, source files, etc.) - package ast import ( - "bytes" "errors" "fmt" - "strings" - "unicode" - "github.com/open-policy-agent/opa/ast/internal/tokens" - astJSON "github.com/open-policy-agent/opa/ast/json" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // MustParseBody returns a parsed body. @@ -30,11 +20,7 @@ func MustParseBody(input string) Body { // MustParseBodyWithOpts returns a parsed body. // If an error occurs during parsing, panic. func MustParseBodyWithOpts(input string, opts ParserOptions) Body { - parsed, err := ParseBodyWithOpts(input, opts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseBodyWithOpts(input, setDefaultRegoVersion(opts)) } // MustParseExpr returns a parsed expression. @@ -66,11 +52,7 @@ func MustParseModule(input string) *Module { // MustParseModuleWithOpts returns a parsed module. // If an error occurs during parsing, panic. func MustParseModuleWithOpts(input string, opts ParserOptions) *Module { - parsed, err := ParseModuleWithOpts("", input, opts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseModuleWithOpts(input, setDefaultRegoVersion(opts)) } // MustParsePackage returns a Package. @@ -104,11 +86,7 @@ func MustParseStatement(input string) Statement { } func MustParseStatementWithOpts(input string, popts ParserOptions) Statement { - parsed, err := ParseStatementWithOpts(input, popts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseStatementWithOpts(input, setDefaultRegoVersion(popts)) } // MustParseRef returns a parsed reference. @@ -134,11 +112,7 @@ func MustParseRule(input string) *Rule { // MustParseRuleWithOpts returns a parsed rule. // If an error occurs during parsing, panic. func MustParseRuleWithOpts(input string, opts ParserOptions) *Rule { - parsed, err := ParseRuleWithOpts(input, opts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseRuleWithOpts(input, setDefaultRegoVersion(opts)) } // MustParseTerm returns a parsed term. @@ -154,331 +128,59 @@ func MustParseTerm(input string) *Term { // ParseRuleFromBody returns a rule if the body can be interpreted as a rule // definition. Otherwise, an error is returned. func ParseRuleFromBody(module *Module, body Body) (*Rule, error) { - - if len(body) != 1 { - return nil, fmt.Errorf("multiple expressions cannot be used for rule head") - } - - return ParseRuleFromExpr(module, body[0]) + return v1.ParseRuleFromBody(module, body) } // ParseRuleFromExpr returns a rule if the expression can be interpreted as a // rule definition. func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) { - - if len(expr.With) > 0 { - return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head") - } - - if expr.Negated { - return nil, fmt.Errorf("negated expressions cannot be used for rule head") - } - - if _, ok := expr.Terms.(*SomeDecl); ok { - return nil, errors.New("'some' declarations cannot be used for rule head") - } - - if term, ok := expr.Terms.(*Term); ok { - switch v := term.Value.(type) { - case Ref: - if len(v) > 2 { // 2+ dots - return ParseCompleteDocRuleWithDotsFromTerm(module, term) - } - return ParsePartialSetDocRuleFromTerm(module, term) - default: - return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(v)) - } - } - - if _, ok := expr.Terms.([]*Term); !ok { - // This is a defensive check in case other kinds of expression terms are - // introduced in the future. - return nil, errors.New("expression cannot be used for rule head") - } - - if expr.IsEquality() { - return parseCompleteRuleFromEq(module, expr) - } else if expr.IsAssignment() { - rule, err := parseCompleteRuleFromEq(module, expr) - if err != nil { - return nil, err - } - rule.Head.Assign = true - return rule, nil - } - - if _, ok := BuiltinMap[expr.Operator().String()]; ok { - return nil, fmt.Errorf("rule name conflicts with built-in function") - } - - return ParseRuleFromCallExpr(module, expr.Terms.([]*Term)) -} - -func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) { - - // ensure the rule location is set to the expr location - // the helper functions called below try to set the location based - // on the terms they've been provided but that is not as accurate. - defer func() { - if rule != nil { - rule.Location = expr.Location - rule.Head.Location = expr.Location - } - }() - - lhs, rhs := expr.Operand(0), expr.Operand(1) - if lhs == nil || rhs == nil { - return nil, errors.New("assignment requires two operands") - } - - rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs) - if err == nil { - return rule, nil - } - - rule, err = ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs) - if err == nil { - return rule, nil - } - - return ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) + return v1.ParseRuleFromExpr(module, expr) } // ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can // be interpreted as a complete document definition declared with the assignment // operator. func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - - rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) - if err != nil { - return nil, err - } - - rule.Head.Assign = true - - return rule, nil + return v1.ParseCompleteDocRuleFromAssignmentExpr(module, lhs, rhs) } // ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be // interpreted as a complete document definition. func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - var head *Head - - if v, ok := lhs.Value.(Var); ok { - // Modify the code to add the location to the head ref - // and set the head ref's jsonOptions. - head = VarHead(v, lhs.Location, &lhs.jsonOptions) - } else if r, ok := lhs.Value.(Ref); ok { // groundness ? - if _, ok := r[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", r) - } - head = RefHead(r) - if len(r) > 1 && !r[len(r)-1].IsGround() { - return nil, fmt.Errorf("ref not ground") - } - } else { - return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(lhs.Value)) - } - head.Value = rhs - head.Location = lhs.Location - head.setJSONOptions(lhs.jsonOptions) - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) - setJSONOptions(body, &rhs.jsonOptions) - - return &Rule{ - Location: lhs.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: lhs.jsonOptions, - generatedBody: true, - }, nil + return v1.ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) } func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, error) { - ref, ok := term.Value.(Ref) - if !ok { - return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(term.Value)) - } - - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location)) - head.generatedValue = true - head.Location = term.Location - head.jsonOptions = term.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) - setJSONOptions(body, &term.jsonOptions) - - return &Rule{ - Location: term.Location, - Head: head, - Body: body, - Module: module, - - jsonOptions: term.jsonOptions, - }, nil + return v1.ParseCompleteDocRuleWithDotsFromTerm(module, term) } // ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be // interpreted as a partial object document definition. func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - ref, ok := lhs.Value.(Ref) - if !ok { - return nil, fmt.Errorf("%v cannot be used as rule name", TypeName(lhs.Value)) - } - - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - - head := RefHead(ref, rhs) - if len(ref) == 2 { // backcompat for naked `foo.bar = "baz"` statements - head.Name = ref[0].Value.(Var) - head.Key = ref[1] - } - head.Location = rhs.Location - head.jsonOptions = rhs.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) - setJSONOptions(body, &rhs.jsonOptions) - - rule := &Rule{ - Location: rhs.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: rhs.jsonOptions, - } - - return rule, nil + return v1.ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs) } // ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted // as a partial set document definition. func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) { - - ref, ok := term.Value.(Ref) - if !ok || len(ref) == 1 { - return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value)) - } - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - - head := RefHead(ref) - if len(ref) == 2 { - v, ok := ref[0].Value.(Var) - if !ok { - return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value)) - } - // Modify the code to add the location to the head ref - // and set the head ref's jsonOptions. - head = VarHead(v, ref[0].Location, &ref[0].jsonOptions) - head.Key = ref[1] - } - head.Location = term.Location - head.jsonOptions = term.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) - setJSONOptions(body, &term.jsonOptions) - - rule := &Rule{ - Location: term.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: term.jsonOptions, - } - - return rule, nil + return v1.ParsePartialSetDocRuleFromTerm(module, term) } // ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a // function definition (e.g., f(x) = y => f(x) = y { true }). func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - - call, ok := lhs.Value.(Call) - if !ok { - return nil, fmt.Errorf("must be call") - } - - ref, ok := call[0].Value.(Ref) - if !ok { - return nil, fmt.Errorf("%vs cannot be used in function signature", TypeName(call[0].Value)) - } - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - - head := RefHead(ref, rhs) - head.Location = lhs.Location - head.Args = Args(call[1:]) - head.jsonOptions = lhs.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) - setJSONOptions(body, &rhs.jsonOptions) - - rule := &Rule{ - Location: lhs.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: lhs.jsonOptions, - } - - return rule, nil + return v1.ParseRuleFromCallEqExpr(module, lhs, rhs) } // ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a // function returning true or some value (e.g., f(x) => f(x) = true { true }). func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) { - - if len(terms) <= 1 { - return nil, fmt.Errorf("rule argument list must take at least one argument") - } - - loc := terms[0].Location - ref := terms[0].Value.(Ref) - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - head := RefHead(ref, BooleanTerm(true).SetLocation(loc)) - head.Location = loc - head.Args = terms[1:] - head.jsonOptions = terms[0].jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)) - setJSONOptions(body, &terms[0].jsonOptions) - - rule := &Rule{ - Location: loc, - Head: head, - Module: module, - Body: body, - jsonOptions: terms[0].jsonOptions, - } - return rule, nil + return v1.ParseRuleFromCallExpr(module, terms) } // ParseImports returns a slice of Import objects. func ParseImports(input string) ([]*Import, error) { - stmts, _, err := ParseStatements("", input) - if err != nil { - return nil, err - } - result := []*Import{} - for _, stmt := range stmts { - if imp, ok := stmt.(*Import); ok { - result = append(result, imp) - } else { - return nil, fmt.Errorf("expected import but got %T", stmt) - } - } - return result, nil + return v1.ParseImports(input) } // ParseModule returns a parsed Module object. @@ -492,11 +194,7 @@ func ParseModule(filename, input string) (*Module, error) { // For details on Module objects and their fields, see policy.go. // Empty input will return nil, nil. func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module, error) { - stmts, comments, err := ParseStatementsWithOpts(filename, input, popts) - if err != nil { - return nil, err - } - return parseModule(filename, stmts, comments, popts.RegoVersion) + return v1.ParseModuleWithOpts(filename, input, setDefaultRegoVersion(popts)) } // ParseBody returns exactly one body. @@ -508,28 +206,7 @@ func ParseBody(input string) (Body, error) { // ParseBodyWithOpts returns exactly one body. It does _not_ set SkipRules: true on its own, // but respects whatever ParserOptions it's been given. func ParseBodyWithOpts(input string, popts ParserOptions) (Body, error) { - - stmts, _, err := ParseStatementsWithOpts("", input, popts) - if err != nil { - return nil, err - } - - result := Body{} - - for _, stmt := range stmts { - switch stmt := stmt.(type) { - case Body: - for i := range stmt { - result.Append(stmt[i]) - } - case *Comment: - // skip - default: - return nil, fmt.Errorf("expected body but got %T", stmt) - } - } - - return result, nil + return v1.ParseBodyWithOpts(input, setDefaultRegoVersion(popts)) } // ParseExpr returns exactly one expression. @@ -548,15 +225,7 @@ func ParseExpr(input string) (*Expr, error) { // ParsePackage returns exactly one Package. // If multiple statements are parsed, an error is returned. func ParsePackage(input string) (*Package, error) { - stmt, err := ParseStatement(input) - if err != nil { - return nil, err - } - pkg, ok := stmt.(*Package) - if !ok { - return nil, fmt.Errorf("expected package but got %T", stmt) - } - return pkg, nil + return v1.ParsePackage(input) } // ParseTerm returns exactly one term. @@ -592,18 +261,7 @@ func ParseRef(input string) (Ref, error) { // ParseRuleWithOpts returns exactly one rule. // If multiple rules are parsed, an error is returned. func ParseRuleWithOpts(input string, opts ParserOptions) (*Rule, error) { - stmts, _, err := ParseStatementsWithOpts("", input, opts) - if err != nil { - return nil, err - } - if len(stmts) != 1 { - return nil, fmt.Errorf("expected exactly one statement (rule), got %v = %T, %T", stmts, stmts[0], stmts[1]) - } - rule, ok := stmts[0].(*Rule) - if !ok { - return nil, fmt.Errorf("expected rule but got %T", stmts[0]) - } - return rule, nil + return v1.ParseRuleWithOpts(input, setDefaultRegoVersion(opts)) } // ParseRule returns exactly one rule. @@ -622,20 +280,13 @@ func ParseStatement(input string) (Statement, error) { return nil, err } if len(stmts) != 1 { - return nil, fmt.Errorf("expected exactly one statement") + return nil, errors.New("expected exactly one statement") } return stmts[0], nil } func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error) { - stmts, _, err := ParseStatementsWithOpts("", input, popts) - if err != nil { - return nil, err - } - if len(stmts) != 1 { - return nil, fmt.Errorf("expected exactly one statement") - } - return stmts[0], nil + return v1.ParseStatementWithOpts(input, setDefaultRegoVersion(popts)) } // ParseStatements is deprecated. Use ParseStatementWithOpts instead. @@ -646,204 +297,15 @@ func ParseStatements(filename, input string) ([]Statement, []*Comment, error) { // ParseStatementsWithOpts returns a slice of parsed statements. This is the // default return value from the parser. func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) { - - parser := NewParser(). - WithFilename(filename). - WithReader(bytes.NewBufferString(input)). - WithProcessAnnotation(popts.ProcessAnnotation). - WithFutureKeywords(popts.FutureKeywords...). - WithAllFutureKeywords(popts.AllFutureKeywords). - WithCapabilities(popts.Capabilities). - WithSkipRules(popts.SkipRules). - WithJSONOptions(popts.JSONOptions). - WithRegoVersion(popts.RegoVersion). - withUnreleasedKeywords(popts.unreleasedKeywords) - - stmts, comments, errs := parser.Parse() - - if len(errs) > 0 { - return nil, nil, errs - } - - return stmts, comments, nil -} - -func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) { - - if len(stmts) == 0 { - return nil, NewError(ParseErr, &Location{File: filename}, "empty module") - } - - var errs Errors - - pkg, ok := stmts[0].(*Package) - if !ok { - loc := stmts[0].Loc() - errs = append(errs, NewError(ParseErr, loc, "package expected")) - } - - mod := &Module{ - Package: pkg, - stmts: stmts, - } - - // The comments slice only holds comments that were not their own statements. - mod.Comments = append(mod.Comments, comments...) - mod.regoVersion = regoCompatibilityMode - - for i, stmt := range stmts[1:] { - switch stmt := stmt.(type) { - case *Import: - mod.Imports = append(mod.Imports, stmt) - if mod.regoVersion == RegoV0 && Compare(stmt.Path.Value, RegoV1CompatibleRef) == 0 { - mod.regoVersion = RegoV0CompatV1 - } - case *Rule: - setRuleModule(stmt, mod) - mod.Rules = append(mod.Rules, stmt) - case Body: - rule, err := ParseRuleFromBody(mod, stmt) - if err != nil { - errs = append(errs, NewError(ParseErr, stmt[0].Location, err.Error())) - continue - } - rule.generatedBody = true - mod.Rules = append(mod.Rules, rule) - - // NOTE(tsandall): the statement should now be interpreted as a - // rule so update the statement list. This is important for the - // logic below that associates annotations with statements. - stmts[i+1] = rule - case *Package: - errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package")) - case *Annotations: - mod.Annotations = append(mod.Annotations, stmt) - case *Comment: - // Ignore comments, they're handled above. - default: - panic("illegal value") // Indicates grammar is out-of-sync with code. - } - } - - if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 { - for _, rule := range mod.Rules { - for r := rule; r != nil; r = r.Else { - errs = append(errs, CheckRegoV1(r)...) - } - } - } - - if len(errs) > 0 { - return nil, errs - } - - errs = append(errs, attachAnnotationsNodes(mod)...) - - if len(errs) > 0 { - return nil, errs - } - - attachRuleAnnotations(mod) - - return mod, nil -} - -func ruleDeclarationHasKeyword(rule *Rule, keyword tokens.Token) bool { - for _, kw := range rule.Head.keywords { - if kw == keyword { - return true - } - } - return false -} - -func newScopeAttachmentErr(a *Annotations, want string) *Error { - var have string - if a.node != nil { - have = fmt.Sprintf(" (have %v)", TypeName(a.node)) - } - return NewError(ParseErr, a.Loc(), "annotation scope '%v' must be applied to %v%v", a.Scope, want, have) -} - -func setRuleModule(rule *Rule, module *Module) { - rule.Module = module - if rule.Else != nil { - setRuleModule(rule.Else, module) - } -} - -func setJSONOptions(x interface{}, jsonOptions *astJSON.Options) { - vis := NewGenericVisitor(func(x interface{}) bool { - if x, ok := x.(customJSON); ok { - x.setJSONOptions(*jsonOptions) - } - return false - }) - vis.Walk(x) + return v1.ParseStatementsWithOpts(filename, input, setDefaultRegoVersion(popts)) } // ParserErrorDetail holds additional details for parser errors. -type ParserErrorDetail struct { - Line string `json:"line"` - Idx int `json:"idx"` -} - -func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail { - - // Find first non-space character at or before offset position. - if offset >= len(bs) { - offset = len(bs) - 1 - } else if offset < 0 { - offset = 0 - } - - for offset > 0 && unicode.IsSpace(rune(bs[offset])) { - offset-- - } - - // Find beginning of line containing offset. - begin := offset - - for begin > 0 && !isNewLineChar(bs[begin]) { - begin-- - } +type ParserErrorDetail = v1.ParserErrorDetail - if isNewLineChar(bs[begin]) { - begin++ +func setDefaultRegoVersion(opts ParserOptions) ParserOptions { + if opts.RegoVersion == RegoUndefined { + opts.RegoVersion = DefaultRegoVersion } - - // Find end of line containing offset. - end := offset - - for end < len(bs) && !isNewLineChar(bs[end]) { - end++ - } - - if begin > end { - begin = end - } - - // Extract line and compute index of offset byte in line. - line := bs[begin:end] - index := offset - begin - - return &ParserErrorDetail{ - Line: string(line), - Idx: index, - } -} - -// Lines returns the pretty formatted line output for the error details. -func (d ParserErrorDetail) Lines() []string { - line := strings.TrimLeft(d.Line, "\t") // remove leading tabs - tabCount := len(d.Line) - len(line) - indent := d.Idx - tabCount - if indent < 0 { - indent = 0 - } - return []string{line, strings.Repeat(" ", indent) + "^"} -} - -func isNewLineChar(b byte) bool { - return b == '\r' || b == '\n' + return opts } diff --git a/vendor/github.com/open-policy-agent/opa/ast/policy.go b/vendor/github.com/open-policy-agent/opa/ast/policy.go index 43e9bba4a32..3da7fdd6369 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/policy.go +++ b/vendor/github.com/open-policy-agent/opa/ast/policy.go @@ -1,196 +1,113 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package ast import ( - "bytes" - "encoding/json" - "fmt" - "math/rand" - "strings" - "time" - - "github.com/open-policy-agent/opa/ast/internal/tokens" astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) -// Initialize seed for term hashing. This is intentionally placed before the -// root document sets are constructed to ensure they use the same hash seed as -// subsequent lookups. If the hash seeds are out of sync, lookups will fail. -var hashSeed = rand.New(rand.NewSource(time.Now().UnixNano())) -var hashSeed0 = (uint64(hashSeed.Uint32()) << 32) | uint64(hashSeed.Uint32()) - // DefaultRootDocument is the default root document. // // All package directives inside source files are implicitly prefixed with the // DefaultRootDocument value. -var DefaultRootDocument = VarTerm("data") +var DefaultRootDocument = v1.DefaultRootDocument // InputRootDocument names the document containing query arguments. -var InputRootDocument = VarTerm("input") +var InputRootDocument = v1.InputRootDocument // SchemaRootDocument names the document containing external data schemas. -var SchemaRootDocument = VarTerm("schema") +var SchemaRootDocument = v1.SchemaRootDocument // FunctionArgRootDocument names the document containing function arguments. // It's only for internal usage, for referencing function arguments between // the index and topdown. -var FunctionArgRootDocument = VarTerm("args") +var FunctionArgRootDocument = v1.FunctionArgRootDocument // FutureRootDocument names the document containing new, to-become-default, // features. -var FutureRootDocument = VarTerm("future") +var FutureRootDocument = v1.FutureRootDocument // RegoRootDocument names the document containing new, to-become-default, // features in a future versioned release. -var RegoRootDocument = VarTerm("rego") +var RegoRootDocument = v1.RegoRootDocument // RootDocumentNames contains the names of top-level documents that can be // referred to in modules and queries. // // Note, the schema document is not currently implemented in the evaluator so it // is not registered as a root document name (yet). -var RootDocumentNames = NewSet( - DefaultRootDocument, - InputRootDocument, -) +var RootDocumentNames = v1.RootDocumentNames // DefaultRootRef is a reference to the root of the default document. // // All refs to data in the policy engine's storage layer are prefixed with this ref. -var DefaultRootRef = Ref{DefaultRootDocument} +var DefaultRootRef = v1.DefaultRootRef // InputRootRef is a reference to the root of the input document. // // All refs to query arguments are prefixed with this ref. -var InputRootRef = Ref{InputRootDocument} +var InputRootRef = v1.InputRootRef // SchemaRootRef is a reference to the root of the schema document. // // All refs to schema documents are prefixed with this ref. Note, the schema // document is not currently implemented in the evaluator so it is not // registered as a root document ref (yet). -var SchemaRootRef = Ref{SchemaRootDocument} +var SchemaRootRef = v1.SchemaRootRef // RootDocumentRefs contains the prefixes of top-level documents that all // non-local references start with. -var RootDocumentRefs = NewSet( - NewTerm(DefaultRootRef), - NewTerm(InputRootRef), -) +var RootDocumentRefs = v1.RootDocumentRefs // SystemDocumentKey is the name of the top-level key that identifies the system // document. -var SystemDocumentKey = String("system") +const SystemDocumentKey = v1.SystemDocumentKey // ReservedVars is the set of names that refer to implicitly ground vars. -var ReservedVars = NewVarSet( - DefaultRootDocument.Value.(Var), - InputRootDocument.Value.(Var), -) +var ReservedVars = v1.ReservedVars // Wildcard represents the wildcard variable as defined in the language. -var Wildcard = &Term{Value: Var("_")} +var Wildcard = v1.Wildcard // WildcardPrefix is the special character that all wildcard variables are // prefixed with when the statement they are contained in is parsed. -var WildcardPrefix = "$" +const WildcardPrefix = v1.WildcardPrefix // Keywords contains strings that map to language keywords. -var Keywords = KeywordsForRegoVersion(DefaultRegoVersion) +var Keywords = v1.Keywords -var KeywordsV0 = [...]string{ - "not", - "package", - "import", - "as", - "default", - "else", - "with", - "null", - "true", - "false", - "some", -} +var KeywordsV0 = v1.KeywordsV0 -var KeywordsV1 = [...]string{ - "not", - "package", - "import", - "as", - "default", - "else", - "with", - "null", - "true", - "false", - "some", - "if", - "contains", - "in", - "every", -} +var KeywordsV1 = v1.KeywordsV1 func KeywordsForRegoVersion(v RegoVersion) []string { - switch v { - case RegoV0: - return KeywordsV0[:] - case RegoV1, RegoV0CompatV1: - return KeywordsV1[:] - } - return nil + return v1.KeywordsForRegoVersion(v) } // IsKeyword returns true if s is a language keyword. func IsKeyword(s string) bool { - return IsInKeywords(s, Keywords) + return v1.IsKeyword(s) } func IsInKeywords(s string, keywords []string) bool { - for _, x := range keywords { - if x == s { - return true - } - } - return false + return v1.IsInKeywords(s, keywords) } // IsKeywordInRegoVersion returns true if s is a language keyword. func IsKeywordInRegoVersion(s string, regoVersion RegoVersion) bool { - switch regoVersion { - case RegoV0: - for _, x := range KeywordsV0 { - if x == s { - return true - } - } - case RegoV1, RegoV0CompatV1: - for _, x := range KeywordsV1 { - if x == s { - return true - } - } - } - - return false + return v1.IsKeywordInRegoVersion(s, regoVersion) } type ( // Node represents a node in an AST. Nodes may be statements in a policy module // or elements of an ad-hoc query, expression, etc. - Node interface { - fmt.Stringer - Loc() *Location - SetLoc(*Location) - } + Node = v1.Node // Statement represents a single statement in a policy module. - Statement interface { - Node - } + Statement = v1.Statement ) type ( @@ -198,1894 +115,121 @@ type ( // Module represents a collection of policies (defined by rules) // within a namespace (defined by the package) and optional // dependencies on external documents (defined by imports). - Module struct { - Package *Package `json:"package"` - Imports []*Import `json:"imports,omitempty"` - Annotations []*Annotations `json:"annotations,omitempty"` - Rules []*Rule `json:"rules,omitempty"` - Comments []*Comment `json:"comments,omitempty"` - stmts []Statement - regoVersion RegoVersion - } + Module = v1.Module // Comment contains the raw text from the comment in the definition. - Comment struct { - // TODO: these fields have inconsistent JSON keys with other structs in this package. - Text []byte - Location *Location - - jsonOptions astJSON.Options - } + Comment = v1.Comment // Package represents the namespace of the documents produced // by rules inside the module. - Package struct { - Path Ref `json:"path"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + Package = v1.Package // Import represents a dependency on a document outside of the policy // namespace. Imports are optional. - Import struct { - Path *Term `json:"path"` - Alias Var `json:"alias,omitempty"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + Import = v1.Import // Rule represents a rule as defined in the language. Rules define the // content of documents that represent policy decisions. - Rule struct { - Default bool `json:"default,omitempty"` - Head *Head `json:"head"` - Body Body `json:"body"` - Else *Rule `json:"else,omitempty"` - Location *Location `json:"location,omitempty"` - Annotations []*Annotations `json:"annotations,omitempty"` - - // Module is a pointer to the module containing this rule. If the rule - // was NOT created while parsing/constructing a module, this should be - // left unset. The pointer is not included in any standard operations - // on the rule (e.g., printing, comparison, visiting, etc.) - Module *Module `json:"-"` - - generatedBody bool - jsonOptions astJSON.Options - } + Rule = v1.Rule // Head represents the head of a rule. - Head struct { - Name Var `json:"name,omitempty"` - Reference Ref `json:"ref,omitempty"` - Args Args `json:"args,omitempty"` - Key *Term `json:"key,omitempty"` - Value *Term `json:"value,omitempty"` - Assign bool `json:"assign,omitempty"` - Location *Location `json:"location,omitempty"` - - keywords []tokens.Token - generatedValue bool - jsonOptions astJSON.Options - } + Head = v1.Head // Args represents zero or more arguments to a rule. - Args []*Term + Args = v1.Args // Body represents one or more expressions contained inside a rule or user // function. - Body []*Expr + Body = v1.Body // Expr represents a single expression contained inside the body of a rule. - Expr struct { - With []*With `json:"with,omitempty"` - Terms interface{} `json:"terms"` - Index int `json:"index"` - Generated bool `json:"generated,omitempty"` - Negated bool `json:"negated,omitempty"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - generatedFrom *Expr - generates []*Expr - } + Expr = v1.Expr // SomeDecl represents a variable declaration statement. The symbols are variables. - SomeDecl struct { - Symbols []*Term `json:"symbols"` - Location *Location `json:"location,omitempty"` + SomeDecl = v1.SomeDecl - jsonOptions astJSON.Options - } - - Every struct { - Key *Term `json:"key"` - Value *Term `json:"value"` - Domain *Term `json:"domain"` - Body Body `json:"body"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + Every = v1.Every // With represents a modifier on an expression. - With struct { - Target *Term `json:"target"` - Value *Term `json:"value"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + With = v1.With ) -// Compare returns an integer indicating whether mod is less than, equal to, -// or greater than other. -func (mod *Module) Compare(other *Module) int { - if mod == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := mod.Package.Compare(other.Package); cmp != 0 { - return cmp - } - if cmp := importsCompare(mod.Imports, other.Imports); cmp != 0 { - return cmp - } - if cmp := annotationsCompare(mod.Annotations, other.Annotations); cmp != 0 { - return cmp - } - return rulesCompare(mod.Rules, other.Rules) -} - -// Copy returns a deep copy of mod. -func (mod *Module) Copy() *Module { - cpy := *mod - cpy.Rules = make([]*Rule, len(mod.Rules)) - - nodes := make(map[Node]Node, len(mod.Rules)+len(mod.Imports)+1 /* package */) - - for i := range mod.Rules { - cpy.Rules[i] = mod.Rules[i].Copy() - cpy.Rules[i].Module = &cpy - nodes[mod.Rules[i]] = cpy.Rules[i] - } - - cpy.Imports = make([]*Import, len(mod.Imports)) - for i := range mod.Imports { - cpy.Imports[i] = mod.Imports[i].Copy() - nodes[mod.Imports[i]] = cpy.Imports[i] - } - - cpy.Package = mod.Package.Copy() - nodes[mod.Package] = cpy.Package - - cpy.Annotations = make([]*Annotations, len(mod.Annotations)) - for i, a := range mod.Annotations { - cpy.Annotations[i] = a.Copy(nodes[a.node]) - } - - cpy.Comments = make([]*Comment, len(mod.Comments)) - for i := range mod.Comments { - cpy.Comments[i] = mod.Comments[i].Copy() - } - - cpy.stmts = make([]Statement, len(mod.stmts)) - for i := range mod.stmts { - cpy.stmts[i] = nodes[mod.stmts[i]] - } - - return &cpy -} - -// Equal returns true if mod equals other. -func (mod *Module) Equal(other *Module) bool { - return mod.Compare(other) == 0 -} - -func (mod *Module) String() string { - byNode := map[Node][]*Annotations{} - for _, a := range mod.Annotations { - byNode[a.node] = append(byNode[a.node], a) - } - - appendAnnotationStrings := func(buf []string, node Node) []string { - if as, ok := byNode[node]; ok { - for i := range as { - buf = append(buf, "# METADATA") - buf = append(buf, "# "+as[i].String()) - } - } - return buf - } - - buf := []string{} - buf = appendAnnotationStrings(buf, mod.Package) - buf = append(buf, mod.Package.String()) - - if len(mod.Imports) > 0 { - buf = append(buf, "") - for _, imp := range mod.Imports { - buf = appendAnnotationStrings(buf, imp) - buf = append(buf, imp.String()) - } - } - if len(mod.Rules) > 0 { - buf = append(buf, "") - for _, rule := range mod.Rules { - buf = appendAnnotationStrings(buf, rule) - buf = append(buf, rule.stringWithOpts(toStringOpts{regoVersion: mod.regoVersion})) - } - } - return strings.Join(buf, "\n") -} - -// RuleSet returns a RuleSet containing named rules in the mod. -func (mod *Module) RuleSet(name Var) RuleSet { - rs := NewRuleSet() - for _, rule := range mod.Rules { - if rule.Head.Name.Equal(name) { - rs.Add(rule) - } - } - return rs -} - -// UnmarshalJSON parses bs and stores the result in mod. The rules in the module -// will have their module pointer set to mod. -func (mod *Module) UnmarshalJSON(bs []byte) error { - - // Declare a new type and use a type conversion to avoid recursively calling - // Module#UnmarshalJSON. - type module Module - - if err := util.UnmarshalJSON(bs, (*module)(mod)); err != nil { - return err - } - - WalkRules(mod, func(rule *Rule) bool { - rule.Module = mod - return false - }) - - return nil -} - -func (mod *Module) regoV1Compatible() bool { - return mod.regoVersion == RegoV1 || mod.regoVersion == RegoV0CompatV1 -} - -func (mod *Module) RegoVersion() RegoVersion { - return mod.regoVersion -} - -// SetRegoVersion sets the RegoVersion for the module. -// Note: Setting a rego-version that does not match the module's rego-version might have unintended consequences. -func (mod *Module) SetRegoVersion(v RegoVersion) { - mod.regoVersion = v -} - // NewComment returns a new Comment object. func NewComment(text []byte) *Comment { - return &Comment{ - Text: text, - } -} - -// Loc returns the location of the comment in the definition. -func (c *Comment) Loc() *Location { - if c == nil { - return nil - } - return c.Location -} - -// SetLoc sets the location on c. -func (c *Comment) SetLoc(loc *Location) { - c.Location = loc -} - -func (c *Comment) String() string { - return "#" + string(c.Text) -} - -// Copy returns a deep copy of c. -func (c *Comment) Copy() *Comment { - cpy := *c - cpy.Text = make([]byte, len(c.Text)) - copy(cpy.Text, c.Text) - return &cpy -} - -// Equal returns true if this comment equals the other comment. -// Unlike other equality checks on AST nodes, comment equality -// depends on location. -func (c *Comment) Equal(other *Comment) bool { - return c.Location.Equal(other.Location) && bytes.Equal(c.Text, other.Text) -} - -func (c *Comment) setJSONOptions(opts astJSON.Options) { - // Note: this is not used for location since Comments use default JSON marshaling - // behavior with struct field names in JSON. - c.jsonOptions = opts - if c.Location != nil { - c.Location.JSONOptions = opts - } -} - -// Compare returns an integer indicating whether pkg is less than, equal to, -// or greater than other. -func (pkg *Package) Compare(other *Package) int { - return Compare(pkg.Path, other.Path) -} - -// Copy returns a deep copy of pkg. -func (pkg *Package) Copy() *Package { - cpy := *pkg - cpy.Path = pkg.Path.Copy() - return &cpy -} - -// Equal returns true if pkg is equal to other. -func (pkg *Package) Equal(other *Package) bool { - return pkg.Compare(other) == 0 -} - -// Loc returns the location of the Package in the definition. -func (pkg *Package) Loc() *Location { - if pkg == nil { - return nil - } - return pkg.Location -} - -// SetLoc sets the location on pkg. -func (pkg *Package) SetLoc(loc *Location) { - pkg.Location = loc -} - -func (pkg *Package) String() string { - if pkg == nil { - return "" - } else if len(pkg.Path) <= 1 { - return fmt.Sprintf("package ", pkg.Path) - } - // Omit head as all packages have the DefaultRootDocument prepended at parse time. - path := make(Ref, len(pkg.Path)-1) - path[0] = VarTerm(string(pkg.Path[1].Value.(String))) - copy(path[1:], pkg.Path[2:]) - return fmt.Sprintf("package %v", path) -} - -func (pkg *Package) setJSONOptions(opts astJSON.Options) { - pkg.jsonOptions = opts - if pkg.Location != nil { - pkg.Location.JSONOptions = opts - } -} - -func (pkg *Package) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "path": pkg.Path, - } - - if pkg.jsonOptions.MarshalOptions.IncludeLocation.Package { - if pkg.Location != nil { - data["location"] = pkg.Location - } - } - - return json.Marshal(data) + return v1.NewComment(text) } // IsValidImportPath returns an error indicating if the import path is invalid. // If the import path is valid, err is nil. func IsValidImportPath(v Value) (err error) { - switch v := v.(type) { - case Var: - if !v.Equal(DefaultRootDocument.Value) && !v.Equal(InputRootDocument.Value) { - return fmt.Errorf("invalid path %v: path must begin with input or data", v) - } - case Ref: - if err := IsValidImportPath(v[0].Value); err != nil { - return fmt.Errorf("invalid path %v: path must begin with input or data", v) - } - for _, e := range v[1:] { - if _, ok := e.Value.(String); !ok { - return fmt.Errorf("invalid path %v: path elements must be strings", v) - } - } - default: - return fmt.Errorf("invalid path %v: path must be ref or var", v) - } - return nil -} - -// Compare returns an integer indicating whether imp is less than, equal to, -// or greater than other. -func (imp *Import) Compare(other *Import) int { - if imp == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := Compare(imp.Path, other.Path); cmp != 0 { - return cmp - } - return Compare(imp.Alias, other.Alias) -} - -// Copy returns a deep copy of imp. -func (imp *Import) Copy() *Import { - cpy := *imp - cpy.Path = imp.Path.Copy() - return &cpy -} - -// Equal returns true if imp is equal to other. -func (imp *Import) Equal(other *Import) bool { - return imp.Compare(other) == 0 -} - -// Loc returns the location of the Import in the definition. -func (imp *Import) Loc() *Location { - if imp == nil { - return nil - } - return imp.Location -} - -// SetLoc sets the location on imp. -func (imp *Import) SetLoc(loc *Location) { - imp.Location = loc -} - -// Name returns the variable that is used to refer to the imported virtual -// document. This is the alias if defined otherwise the last element in the -// path. -func (imp *Import) Name() Var { - if len(imp.Alias) != 0 { - return imp.Alias - } - switch v := imp.Path.Value.(type) { - case Var: - return v - case Ref: - if len(v) == 1 { - return v[0].Value.(Var) - } - return Var(v[len(v)-1].Value.(String)) - } - panic("illegal import") -} - -func (imp *Import) String() string { - buf := []string{"import", imp.Path.String()} - if len(imp.Alias) > 0 { - buf = append(buf, "as "+imp.Alias.String()) - } - return strings.Join(buf, " ") -} - -func (imp *Import) setJSONOptions(opts astJSON.Options) { - imp.jsonOptions = opts - if imp.Location != nil { - imp.Location.JSONOptions = opts - } -} - -func (imp *Import) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "path": imp.Path, - } - - if len(imp.Alias) != 0 { - data["alias"] = imp.Alias - } - - if imp.jsonOptions.MarshalOptions.IncludeLocation.Import { - if imp.Location != nil { - data["location"] = imp.Location - } - } - - return json.Marshal(data) -} - -// Compare returns an integer indicating whether rule is less than, equal to, -// or greater than other. -func (rule *Rule) Compare(other *Rule) int { - if rule == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := rule.Head.Compare(other.Head); cmp != 0 { - return cmp - } - if cmp := util.Compare(rule.Default, other.Default); cmp != 0 { - return cmp - } - if cmp := rule.Body.Compare(other.Body); cmp != 0 { - return cmp - } - - if cmp := annotationsCompare(rule.Annotations, other.Annotations); cmp != 0 { - return cmp - } - - return rule.Else.Compare(other.Else) -} - -// Copy returns a deep copy of rule. -func (rule *Rule) Copy() *Rule { - cpy := *rule - cpy.Head = rule.Head.Copy() - cpy.Body = rule.Body.Copy() - - cpy.Annotations = make([]*Annotations, len(rule.Annotations)) - for i, a := range rule.Annotations { - cpy.Annotations[i] = a.Copy(&cpy) - } - - if cpy.Else != nil { - cpy.Else = rule.Else.Copy() - } - return &cpy -} - -// Equal returns true if rule is equal to other. -func (rule *Rule) Equal(other *Rule) bool { - return rule.Compare(other) == 0 -} - -// Loc returns the location of the Rule in the definition. -func (rule *Rule) Loc() *Location { - if rule == nil { - return nil - } - return rule.Location -} - -// SetLoc sets the location on rule. -func (rule *Rule) SetLoc(loc *Location) { - rule.Location = loc -} - -// Path returns a ref referring to the document produced by this rule. If rule -// is not contained in a module, this function panics. -// Deprecated: Poor handling of ref rules. Use `(*Rule).Ref()` instead. -func (rule *Rule) Path() Ref { - if rule.Module == nil { - panic("assertion failed") - } - return rule.Module.Package.Path.Extend(rule.Head.Ref().GroundPrefix()) -} - -// Ref returns a ref referring to the document produced by this rule. If rule -// is not contained in a module, this function panics. The returned ref may -// contain variables in the last position. -func (rule *Rule) Ref() Ref { - if rule.Module == nil { - panic("assertion failed") - } - return rule.Module.Package.Path.Extend(rule.Head.Ref()) -} - -func (rule *Rule) String() string { - return rule.stringWithOpts(toStringOpts{}) -} - -type toStringOpts struct { - regoVersion RegoVersion -} - -func (rule *Rule) stringWithOpts(opts toStringOpts) string { - buf := []string{} - if rule.Default { - buf = append(buf, "default") - } - buf = append(buf, rule.Head.stringWithOpts(opts)) - if !rule.Default { - switch opts.regoVersion { - case RegoV1, RegoV0CompatV1: - buf = append(buf, "if") - } - buf = append(buf, "{") - buf = append(buf, rule.Body.String()) - buf = append(buf, "}") - } - if rule.Else != nil { - buf = append(buf, rule.Else.elseString(opts)) - } - return strings.Join(buf, " ") -} - -func (rule *Rule) isFunction() bool { - return len(rule.Head.Args) > 0 -} - -func (rule *Rule) setJSONOptions(opts astJSON.Options) { - rule.jsonOptions = opts - if rule.Location != nil { - rule.Location.JSONOptions = opts - } -} - -func (rule *Rule) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "head": rule.Head, - "body": rule.Body, - } - - if rule.Default { - data["default"] = true - } - - if rule.Else != nil { - data["else"] = rule.Else - } - - if rule.jsonOptions.MarshalOptions.IncludeLocation.Rule { - if rule.Location != nil { - data["location"] = rule.Location - } - } - - if len(rule.Annotations) != 0 { - data["annotations"] = rule.Annotations - } - - return json.Marshal(data) -} - -func (rule *Rule) elseString(opts toStringOpts) string { - var buf []string - - buf = append(buf, "else") - - value := rule.Head.Value - if value != nil { - buf = append(buf, "=") - buf = append(buf, value.String()) - } - - switch opts.regoVersion { - case RegoV1, RegoV0CompatV1: - buf = append(buf, "if") - } - - buf = append(buf, "{") - buf = append(buf, rule.Body.String()) - buf = append(buf, "}") - - if rule.Else != nil { - buf = append(buf, rule.Else.elseString(opts)) - } - - return strings.Join(buf, " ") + return v1.IsValidImportPath(v) } // NewHead returns a new Head object. If args are provided, the first will be // used for the key and the second will be used for the value. func NewHead(name Var, args ...*Term) *Head { - head := &Head{ - Name: name, // backcompat - Reference: []*Term{NewTerm(name)}, - } - if len(args) == 0 { - return head - } - head.Key = args[0] - if len(args) == 1 { - return head - } - head.Value = args[1] - if head.Key != nil && head.Value != nil { - head.Reference = head.Reference.Append(args[0]) - } - return head + return v1.NewHead(name, args...) } // VarHead creates a head object, initializes its Name, Location, and Options, // and returns the new head. func VarHead(name Var, location *Location, jsonOpts *astJSON.Options) *Head { - h := NewHead(name) - h.Reference[0].Location = location - if jsonOpts != nil { - h.Reference[0].setJSONOptions(*jsonOpts) - } - return h + return v1.VarHead(name, location, jsonOpts) } // RefHead returns a new Head object with the passed Ref. If args are provided, // the first will be used for the value. func RefHead(ref Ref, args ...*Term) *Head { - head := &Head{} - head.SetRef(ref) - if len(ref) < 2 { - head.Name = ref[0].Value.(Var) - } - if len(args) >= 1 { - head.Value = args[0] - } - return head + return v1.RefHead(ref, args...) } // DocKind represents the collection of document types that can be produced by rules. -type DocKind int +type DocKind = v1.DocKind const ( // CompleteDoc represents a document that is completely defined by the rule. - CompleteDoc = iota + CompleteDoc = v1.CompleteDoc // PartialSetDoc represents a set document that is partially defined by the rule. - PartialSetDoc + PartialSetDoc = v1.PartialSetDoc // PartialObjectDoc represents an object document that is partially defined by the rule. - PartialObjectDoc -) // TODO(sr): Deprecate? - -// DocKind returns the type of document produced by this rule. -func (head *Head) DocKind() DocKind { - if head.Key != nil { - if head.Value != nil { - return PartialObjectDoc - } - return PartialSetDoc - } - return CompleteDoc -} + PartialObjectDoc = v1.PartialObjectDoc +) -type RuleKind int +type RuleKind = v1.RuleKind const ( - SingleValue = iota - MultiValue + SingleValue = v1.SingleValue + MultiValue = v1.MultiValue ) -// RuleKind returns the type of rule this is -func (head *Head) RuleKind() RuleKind { - // NOTE(sr): This is bit verbose, since the key is irrelevant for single vs - // multi value, but as good a spot as to assert the invariant. - switch { - case head.Value != nil: - return SingleValue - case head.Key != nil: - return MultiValue - default: - panic("unreachable") - } -} - -// Ref returns the Ref of the rule. If it doesn't have one, it's filled in -// via the Head's Name. -func (head *Head) Ref() Ref { - if len(head.Reference) > 0 { - return head.Reference - } - return Ref{&Term{Value: head.Name}} -} - -// SetRef can be used to set a rule head's Reference -func (head *Head) SetRef(r Ref) { - head.Reference = r -} - -// Compare returns an integer indicating whether head is less than, equal to, -// or greater than other. -func (head *Head) Compare(other *Head) int { - if head == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if head.Assign && !other.Assign { - return -1 - } else if !head.Assign && other.Assign { - return 1 - } - if cmp := Compare(head.Args, other.Args); cmp != 0 { - return cmp - } - if cmp := Compare(head.Reference, other.Reference); cmp != 0 { - return cmp - } - if cmp := Compare(head.Name, other.Name); cmp != 0 { - return cmp - } - if cmp := Compare(head.Key, other.Key); cmp != 0 { - return cmp - } - return Compare(head.Value, other.Value) -} - -// Copy returns a deep copy of head. -func (head *Head) Copy() *Head { - cpy := *head - cpy.Reference = head.Reference.Copy() - cpy.Args = head.Args.Copy() - cpy.Key = head.Key.Copy() - cpy.Value = head.Value.Copy() - cpy.keywords = nil - return &cpy -} - -// Equal returns true if this head equals other. -func (head *Head) Equal(other *Head) bool { - return head.Compare(other) == 0 -} - -func (head *Head) String() string { - return head.stringWithOpts(toStringOpts{}) -} - -func (head *Head) stringWithOpts(opts toStringOpts) string { - buf := strings.Builder{} - buf.WriteString(head.Ref().String()) - containsAdded := false - - switch { - case len(head.Args) != 0: - buf.WriteString(head.Args.String()) - case len(head.Reference) == 1 && head.Key != nil: - switch opts.regoVersion { - case RegoV0: - buf.WriteRune('[') - buf.WriteString(head.Key.String()) - buf.WriteRune(']') - default: - containsAdded = true - buf.WriteString(" contains ") - buf.WriteString(head.Key.String()) - } - } - if head.Value != nil { - if head.Assign { - buf.WriteString(" := ") - } else { - buf.WriteString(" = ") - } - buf.WriteString(head.Value.String()) - } else if !containsAdded && head.Name == "" && head.Key != nil { - buf.WriteString(" contains ") - buf.WriteString(head.Key.String()) - } - return buf.String() -} - -func (head *Head) setJSONOptions(opts astJSON.Options) { - head.jsonOptions = opts - if head.Location != nil { - head.Location.JSONOptions = opts - } -} - -func (head *Head) MarshalJSON() ([]byte, error) { - var loc *Location - includeLoc := head.jsonOptions.MarshalOptions.IncludeLocation - if includeLoc.Head { - if head.Location != nil { - loc = head.Location - } - - for _, term := range head.Reference { - if term.Location != nil { - term.jsonOptions.MarshalOptions.IncludeLocation.Term = includeLoc.Term - } - } - } - - // NOTE(sr): we do this to override the rendering of `head.Reference`. - // It's still what'll be used via the default means of encoding/json - // for unmarshaling a json object into a Head struct! - type h Head - return json.Marshal(struct { - h - Ref Ref `json:"ref"` - Location *Location `json:"location,omitempty"` - }{ - h: h(*head), - Ref: head.Ref(), - Location: loc, - }) -} - -// Vars returns a set of vars found in the head. -func (head *Head) Vars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - // TODO: improve test coverage for this. - if head.Args != nil { - vis.Walk(head.Args) - } - if head.Key != nil { - vis.Walk(head.Key) - } - if head.Value != nil { - vis.Walk(head.Value) - } - if len(head.Reference) > 0 { - vis.Walk(head.Reference[1:]) - } - return vis.vars -} - -// Loc returns the Location of head. -func (head *Head) Loc() *Location { - if head == nil { - return nil - } - return head.Location -} - -// SetLoc sets the location on head. -func (head *Head) SetLoc(loc *Location) { - head.Location = loc -} - -func (head *Head) HasDynamicRef() bool { - pos := head.Reference.Dynamic() - // Ref is dynamic if it has one non-constant term that isn't the first or last term or if it's a partial set rule. - return pos > 0 && (pos < len(head.Reference)-1 || head.RuleKind() == MultiValue) -} - -// Copy returns a deep copy of a. -func (a Args) Copy() Args { - cpy := Args{} - for _, t := range a { - cpy = append(cpy, t.Copy()) - } - return cpy -} - -func (a Args) String() string { - buf := make([]string, 0, len(a)) - for _, t := range a { - buf = append(buf, t.String()) - } - return "(" + strings.Join(buf, ", ") + ")" -} - -// Loc returns the Location of a. -func (a Args) Loc() *Location { - if len(a) == 0 { - return nil - } - return a[0].Location -} - -// SetLoc sets the location on a. -func (a Args) SetLoc(loc *Location) { - if len(a) != 0 { - a[0].SetLocation(loc) - } -} - -// Vars returns a set of vars that appear in a. -func (a Args) Vars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - vis.Walk(a) - return vis.vars -} - // NewBody returns a new Body containing the given expressions. The indices of // the immediate expressions will be reset. func NewBody(exprs ...*Expr) Body { - for i, expr := range exprs { - expr.Index = i - } - return Body(exprs) -} - -// MarshalJSON returns JSON encoded bytes representing body. -func (body Body) MarshalJSON() ([]byte, error) { - // Serialize empty Body to empty array. This handles both the empty case and the - // nil case (whereas by default the result would be null if body was nil.) - if len(body) == 0 { - return []byte(`[]`), nil - } - ret, err := json.Marshal([]*Expr(body)) - return ret, err -} - -// Append adds the expr to the body and updates the expr's index accordingly. -func (body *Body) Append(expr *Expr) { - n := len(*body) - expr.Index = n - *body = append(*body, expr) -} - -// Set sets the expr in the body at the specified position and updates the -// expr's index accordingly. -func (body Body) Set(expr *Expr, pos int) { - body[pos] = expr - expr.Index = pos -} - -// Compare returns an integer indicating whether body is less than, equal to, -// or greater than other. -// -// If body is a subset of other, it is considered less than (and vice versa). -func (body Body) Compare(other Body) int { - minLen := len(body) - if len(other) < minLen { - minLen = len(other) - } - for i := 0; i < minLen; i++ { - if cmp := body[i].Compare(other[i]); cmp != 0 { - return cmp - } - } - if len(body) < len(other) { - return -1 - } - if len(other) < len(body) { - return 1 - } - return 0 -} - -// Copy returns a deep copy of body. -func (body Body) Copy() Body { - cpy := make(Body, len(body)) - for i := range body { - cpy[i] = body[i].Copy() - } - return cpy -} - -// Contains returns true if this body contains the given expression. -func (body Body) Contains(x *Expr) bool { - for _, e := range body { - if e.Equal(x) { - return true - } - } - return false -} - -// Equal returns true if this Body is equal to the other Body. -func (body Body) Equal(other Body) bool { - return body.Compare(other) == 0 -} - -// Hash returns the hash code for the Body. -func (body Body) Hash() int { - s := 0 - for _, e := range body { - s += e.Hash() - } - return s -} - -// IsGround returns true if all of the expressions in the Body are ground. -func (body Body) IsGround() bool { - for _, e := range body { - if !e.IsGround() { - return false - } - } - return true -} - -// Loc returns the location of the Body in the definition. -func (body Body) Loc() *Location { - if len(body) == 0 { - return nil - } - return body[0].Location -} - -// SetLoc sets the location on body. -func (body Body) SetLoc(loc *Location) { - if len(body) != 0 { - body[0].SetLocation(loc) - } -} - -func (body Body) String() string { - buf := make([]string, 0, len(body)) - for _, v := range body { - buf = append(buf, v.String()) - } - return strings.Join(buf, "; ") -} - -// Vars returns a VarSet containing variables in body. The params can be set to -// control which vars are included. -func (body Body) Vars(params VarVisitorParams) VarSet { - vis := NewVarVisitor().WithParams(params) - vis.Walk(body) - return vis.Vars() + return v1.NewBody(exprs...) } // NewExpr returns a new Expr object. func NewExpr(terms interface{}) *Expr { - switch terms.(type) { - case *SomeDecl, *Every, *Term, []*Term: // ok - default: - panic("unreachable") - } - return &Expr{ - Negated: false, - Terms: terms, - Index: 0, - With: nil, - } -} - -// Complement returns a copy of this expression with the negation flag flipped. -func (expr *Expr) Complement() *Expr { - cpy := *expr - cpy.Negated = !cpy.Negated - return &cpy -} - -// Equal returns true if this Expr equals the other Expr. -func (expr *Expr) Equal(other *Expr) bool { - return expr.Compare(other) == 0 -} - -// Compare returns an integer indicating whether expr is less than, equal to, -// or greater than other. -// -// Expressions are compared as follows: -// -// 1. Declarations are always less than other expressions. -// 2. Preceding expression (by Index) is always less than the other expression. -// 3. Non-negated expressions are always less than negated expressions. -// 4. Single term expressions are always less than built-in expressions. -// -// Otherwise, the expression terms are compared normally. If both expressions -// have the same terms, the modifiers are compared. -func (expr *Expr) Compare(other *Expr) int { - - if expr == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - - o1 := expr.sortOrder() - o2 := other.sortOrder() - if o1 < o2 { - return -1 - } else if o2 < o1 { - return 1 - } - - switch { - case expr.Index < other.Index: - return -1 - case expr.Index > other.Index: - return 1 - } - - switch { - case expr.Negated && !other.Negated: - return 1 - case !expr.Negated && other.Negated: - return -1 - } - - switch t := expr.Terms.(type) { - case *Term: - if cmp := Compare(t.Value, other.Terms.(*Term).Value); cmp != 0 { - return cmp - } - case []*Term: - if cmp := termSliceCompare(t, other.Terms.([]*Term)); cmp != 0 { - return cmp - } - case *SomeDecl: - if cmp := Compare(t, other.Terms.(*SomeDecl)); cmp != 0 { - return cmp - } - case *Every: - if cmp := Compare(t, other.Terms.(*Every)); cmp != 0 { - return cmp - } - } - - return withSliceCompare(expr.With, other.With) -} - -func (expr *Expr) sortOrder() int { - switch expr.Terms.(type) { - case *SomeDecl: - return 0 - case *Term: - return 1 - case []*Term: - return 2 - case *Every: - return 3 - } - return -1 -} - -// CopyWithoutTerms returns a deep copy of expr without its Terms -func (expr *Expr) CopyWithoutTerms() *Expr { - cpy := *expr - - cpy.With = make([]*With, len(expr.With)) - for i := range expr.With { - cpy.With[i] = expr.With[i].Copy() - } - - return &cpy -} - -// Copy returns a deep copy of expr. -func (expr *Expr) Copy() *Expr { - - cpy := expr.CopyWithoutTerms() - - switch ts := expr.Terms.(type) { - case *SomeDecl: - cpy.Terms = ts.Copy() - case []*Term: - cpyTs := make([]*Term, len(ts)) - for i := range ts { - cpyTs[i] = ts[i].Copy() - } - cpy.Terms = cpyTs - case *Term: - cpy.Terms = ts.Copy() - case *Every: - cpy.Terms = ts.Copy() - } - - return cpy -} - -// Hash returns the hash code of the Expr. -func (expr *Expr) Hash() int { - s := expr.Index - switch ts := expr.Terms.(type) { - case *SomeDecl: - s += ts.Hash() - case []*Term: - for _, t := range ts { - s += t.Value.Hash() - } - case *Term: - s += ts.Value.Hash() - } - if expr.Negated { - s++ - } - for _, w := range expr.With { - s += w.Hash() - } - return s -} - -// IncludeWith returns a copy of expr with the with modifier appended. -func (expr *Expr) IncludeWith(target *Term, value *Term) *Expr { - cpy := *expr - cpy.With = append(cpy.With, &With{Target: target, Value: value}) - return &cpy -} - -// NoWith returns a copy of expr where the with modifier has been removed. -func (expr *Expr) NoWith() *Expr { - cpy := *expr - cpy.With = nil - return &cpy -} - -// IsEquality returns true if this is an equality expression. -func (expr *Expr) IsEquality() bool { - return isGlobalBuiltin(expr, Var(Equality.Name)) -} - -// IsAssignment returns true if this an assignment expression. -func (expr *Expr) IsAssignment() bool { - return isGlobalBuiltin(expr, Var(Assign.Name)) -} - -// IsCall returns true if this expression calls a function. -func (expr *Expr) IsCall() bool { - _, ok := expr.Terms.([]*Term) - return ok -} - -// IsEvery returns true if this expression is an 'every' expression. -func (expr *Expr) IsEvery() bool { - _, ok := expr.Terms.(*Every) - return ok -} - -// IsSome returns true if this expression is a 'some' expression. -func (expr *Expr) IsSome() bool { - _, ok := expr.Terms.(*SomeDecl) - return ok -} - -// Operator returns the name of the function or built-in this expression refers -// to. If this expression is not a function call, returns nil. -func (expr *Expr) Operator() Ref { - op := expr.OperatorTerm() - if op == nil { - return nil - } - return op.Value.(Ref) -} - -// OperatorTerm returns the name of the function or built-in this expression -// refers to. If this expression is not a function call, returns nil. -func (expr *Expr) OperatorTerm() *Term { - terms, ok := expr.Terms.([]*Term) - if !ok || len(terms) == 0 { - return nil - } - return terms[0] -} - -// Operand returns the term at the zero-based pos. If the expr does not include -// at least pos+1 terms, this function returns nil. -func (expr *Expr) Operand(pos int) *Term { - terms, ok := expr.Terms.([]*Term) - if !ok { - return nil - } - idx := pos + 1 - if idx < len(terms) { - return terms[idx] - } - return nil -} - -// Operands returns the built-in function operands. -func (expr *Expr) Operands() []*Term { - terms, ok := expr.Terms.([]*Term) - if !ok { - return nil - } - return terms[1:] -} - -// IsGround returns true if all of the expression terms are ground. -func (expr *Expr) IsGround() bool { - switch ts := expr.Terms.(type) { - case []*Term: - for _, t := range ts[1:] { - if !t.IsGround() { - return false - } - } - case *Term: - return ts.IsGround() - } - return true -} - -// SetOperator sets the expr's operator and returns the expr itself. If expr is -// not a call expr, this function will panic. -func (expr *Expr) SetOperator(term *Term) *Expr { - expr.Terms.([]*Term)[0] = term - return expr -} - -// SetLocation sets the expr's location and returns the expr itself. -func (expr *Expr) SetLocation(loc *Location) *Expr { - expr.Location = loc - return expr -} - -// Loc returns the Location of expr. -func (expr *Expr) Loc() *Location { - if expr == nil { - return nil - } - return expr.Location -} - -// SetLoc sets the location on expr. -func (expr *Expr) SetLoc(loc *Location) { - expr.SetLocation(loc) -} - -func (expr *Expr) String() string { - buf := make([]string, 0, 2+len(expr.With)) - if expr.Negated { - buf = append(buf, "not") - } - switch t := expr.Terms.(type) { - case []*Term: - if expr.IsEquality() && validEqAssignArgCount(expr) { - buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2])) - } else { - buf = append(buf, Call(t).String()) - } - case fmt.Stringer: - buf = append(buf, t.String()) - } - - for i := range expr.With { - buf = append(buf, expr.With[i].String()) - } - - return strings.Join(buf, " ") -} - -func (expr *Expr) setJSONOptions(opts astJSON.Options) { - expr.jsonOptions = opts - if expr.Location != nil { - expr.Location.JSONOptions = opts - } -} - -func (expr *Expr) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "terms": expr.Terms, - "index": expr.Index, - } - - if len(expr.With) > 0 { - data["with"] = expr.With - } - - if expr.Generated { - data["generated"] = true - } - - if expr.Negated { - data["negated"] = true - } - - if expr.jsonOptions.MarshalOptions.IncludeLocation.Expr { - if expr.Location != nil { - data["location"] = expr.Location - } - } - - return json.Marshal(data) -} - -// UnmarshalJSON parses the byte array and stores the result in expr. -func (expr *Expr) UnmarshalJSON(bs []byte) error { - v := map[string]interface{}{} - if err := util.UnmarshalJSON(bs, &v); err != nil { - return err - } - return unmarshalExpr(expr, v) -} - -// Vars returns a VarSet containing variables in expr. The params can be set to -// control which vars are included. -func (expr *Expr) Vars(params VarVisitorParams) VarSet { - vis := NewVarVisitor().WithParams(params) - vis.Walk(expr) - return vis.Vars() + return v1.NewExpr(terms) } // NewBuiltinExpr creates a new Expr object with the supplied terms. // The builtin operator must be the first term. func NewBuiltinExpr(terms ...*Term) *Expr { - return &Expr{Terms: terms} -} - -func (expr *Expr) CogeneratedExprs() []*Expr { - visited := map[*Expr]struct{}{} - visitCogeneratedExprs(expr, func(e *Expr) bool { - if expr.Equal(e) { - return true - } - if _, ok := visited[e]; ok { - return true - } - visited[e] = struct{}{} - return false - }) - - result := make([]*Expr, 0, len(visited)) - for e := range visited { - result = append(result, e) - } - return result -} - -func (expr *Expr) BaseCogeneratedExpr() *Expr { - if expr.generatedFrom == nil { - return expr - } - return expr.generatedFrom.BaseCogeneratedExpr() -} - -func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) { - if parent := expr.generatedFrom; parent != nil { - if stop := f(parent); !stop { - visitCogeneratedExprs(parent, f) - } - } - for _, child := range expr.generates { - if stop := f(child); !stop { - visitCogeneratedExprs(child, f) - } - } -} - -func (d *SomeDecl) String() string { - if call, ok := d.Symbols[0].Value.(Call); ok { - if len(call) == 4 { - return "some " + call[1].String() + ", " + call[2].String() + " in " + call[3].String() - } - return "some " + call[1].String() + " in " + call[2].String() - } - buf := make([]string, len(d.Symbols)) - for i := range buf { - buf[i] = d.Symbols[i].String() - } - return "some " + strings.Join(buf, ", ") -} - -// SetLoc sets the Location on d. -func (d *SomeDecl) SetLoc(loc *Location) { - d.Location = loc -} - -// Loc returns the Location of d. -func (d *SomeDecl) Loc() *Location { - return d.Location -} - -// Copy returns a deep copy of d. -func (d *SomeDecl) Copy() *SomeDecl { - cpy := *d - cpy.Symbols = termSliceCopy(d.Symbols) - return &cpy -} - -// Compare returns an integer indicating whether d is less than, equal to, or -// greater than other. -func (d *SomeDecl) Compare(other *SomeDecl) int { - return termSliceCompare(d.Symbols, other.Symbols) -} - -// Hash returns a hash code of d. -func (d *SomeDecl) Hash() int { - return termSliceHash(d.Symbols) -} - -func (d *SomeDecl) setJSONOptions(opts astJSON.Options) { - d.jsonOptions = opts - if d.Location != nil { - d.Location.JSONOptions = opts - } -} - -func (d *SomeDecl) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "symbols": d.Symbols, - } - - if d.jsonOptions.MarshalOptions.IncludeLocation.SomeDecl { - if d.Location != nil { - data["location"] = d.Location - } - } - - return json.Marshal(data) -} - -func (q *Every) String() string { - if q.Key != nil { - return fmt.Sprintf("every %s, %s in %s { %s }", - q.Key, - q.Value, - q.Domain, - q.Body) - } - return fmt.Sprintf("every %s in %s { %s }", - q.Value, - q.Domain, - q.Body) -} - -func (q *Every) Loc() *Location { - return q.Location -} - -func (q *Every) SetLoc(l *Location) { - q.Location = l -} - -// Copy returns a deep copy of d. -func (q *Every) Copy() *Every { - cpy := *q - cpy.Key = q.Key.Copy() - cpy.Value = q.Value.Copy() - cpy.Domain = q.Domain.Copy() - cpy.Body = q.Body.Copy() - return &cpy -} - -func (q *Every) Compare(other *Every) int { - for _, terms := range [][2]*Term{ - {q.Key, other.Key}, - {q.Value, other.Value}, - {q.Domain, other.Domain}, - } { - if d := Compare(terms[0], terms[1]); d != 0 { - return d - } - } - return q.Body.Compare(other.Body) -} - -// KeyValueVars returns the key and val arguments of an `every` -// expression, if they are non-nil and not wildcards. -func (q *Every) KeyValueVars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - if q.Key != nil { - vis.Walk(q.Key) - } - vis.Walk(q.Value) - return vis.vars -} - -func (q *Every) setJSONOptions(opts astJSON.Options) { - q.jsonOptions = opts - if q.Location != nil { - q.Location.JSONOptions = opts - } -} - -func (q *Every) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "key": q.Key, - "value": q.Value, - "domain": q.Domain, - "body": q.Body, - } - - if q.jsonOptions.MarshalOptions.IncludeLocation.Every { - if q.Location != nil { - data["location"] = q.Location - } - } - - return json.Marshal(data) -} - -func (w *With) String() string { - return "with " + w.Target.String() + " as " + w.Value.String() -} - -// Equal returns true if this With is equals the other With. -func (w *With) Equal(other *With) bool { - return Compare(w, other) == 0 -} - -// Compare returns an integer indicating whether w is less than, equal to, or -// greater than other. -func (w *With) Compare(other *With) int { - if w == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := Compare(w.Target, other.Target); cmp != 0 { - return cmp - } - return Compare(w.Value, other.Value) -} - -// Copy returns a deep copy of w. -func (w *With) Copy() *With { - cpy := *w - cpy.Value = w.Value.Copy() - cpy.Target = w.Target.Copy() - return &cpy -} - -// Hash returns the hash code of the With. -func (w With) Hash() int { - return w.Target.Hash() + w.Value.Hash() -} - -// SetLocation sets the location on w. -func (w *With) SetLocation(loc *Location) *With { - w.Location = loc - return w -} - -// Loc returns the Location of w. -func (w *With) Loc() *Location { - if w == nil { - return nil - } - return w.Location -} - -// SetLoc sets the location on w. -func (w *With) SetLoc(loc *Location) { - w.Location = loc -} - -func (w *With) setJSONOptions(opts astJSON.Options) { - w.jsonOptions = opts - if w.Location != nil { - w.Location.JSONOptions = opts - } -} - -func (w *With) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "target": w.Target, - "value": w.Value, - } - - if w.jsonOptions.MarshalOptions.IncludeLocation.With { - if w.Location != nil { - data["location"] = w.Location - } - } - - return json.Marshal(data) + return v1.NewBuiltinExpr(terms...) } // Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified. func Copy(x interface{}) interface{} { - switch x := x.(type) { - case *Module: - return x.Copy() - case *Package: - return x.Copy() - case *Import: - return x.Copy() - case *Rule: - return x.Copy() - case *Head: - return x.Copy() - case Args: - return x.Copy() - case Body: - return x.Copy() - case *Expr: - return x.Copy() - case *With: - return x.Copy() - case *SomeDecl: - return x.Copy() - case *Every: - return x.Copy() - case *Term: - return x.Copy() - case *ArrayComprehension: - return x.Copy() - case *SetComprehension: - return x.Copy() - case *ObjectComprehension: - return x.Copy() - case Set: - return x.Copy() - case *object: - return x.Copy() - case *Array: - return x.Copy() - case Ref: - return x.Copy() - case Call: - return x.Copy() - case *Comment: - return x.Copy() - } - return x + return v1.Copy(x) } // RuleSet represents a collection of rules that produce a virtual document. -type RuleSet []*Rule +type RuleSet = v1.RuleSet // NewRuleSet returns a new RuleSet containing the given rules. func NewRuleSet(rules ...*Rule) RuleSet { - rs := make(RuleSet, 0, len(rules)) - for _, rule := range rules { - rs.Add(rule) - } - return rs -} - -// Add inserts the rule into rs. -func (rs *RuleSet) Add(rule *Rule) { - for _, exist := range *rs { - if exist.Equal(rule) { - return - } - } - *rs = append(*rs, rule) -} - -// Contains returns true if rs contains rule. -func (rs RuleSet) Contains(rule *Rule) bool { - for i := range rs { - if rs[i].Equal(rule) { - return true - } - } - return false -} - -// Diff returns a new RuleSet containing rules in rs that are not in other. -func (rs RuleSet) Diff(other RuleSet) RuleSet { - result := NewRuleSet() - for i := range rs { - if !other.Contains(rs[i]) { - result.Add(rs[i]) - } - } - return result -} - -// Equal returns true if rs equals other. -func (rs RuleSet) Equal(other RuleSet) bool { - return len(rs.Diff(other)) == 0 && len(other.Diff(rs)) == 0 -} - -// Merge returns a ruleset containing the union of rules from rs an other. -func (rs RuleSet) Merge(other RuleSet) RuleSet { - result := NewRuleSet() - for i := range rs { - result.Add(rs[i]) - } - for i := range other { - result.Add(other[i]) - } - return result -} - -func (rs RuleSet) String() string { - buf := make([]string, 0, len(rs)) - for _, rule := range rs { - buf = append(buf, rule.String()) - } - return "{" + strings.Join(buf, ", ") + "}" -} - -// Returns true if the equality or assignment expression referred to by expr -// has a valid number of arguments. -func validEqAssignArgCount(expr *Expr) bool { - return len(expr.Operands()) == 2 -} - -// this function checks if the expr refers to a non-namespaced (global) built-in -// function like eq, gt, plus, etc. -func isGlobalBuiltin(expr *Expr, name Var) bool { - terms, ok := expr.Terms.([]*Term) - if !ok { - return false - } - - // NOTE(tsandall): do not use Term#Equal or Value#Compare to avoid - // allocation here. - ref, ok := terms[0].Value.(Ref) - if !ok || len(ref) != 1 { - return false - } - if head, ok := ref[0].Value.(Var); ok { - return head.Equal(name) - } - return false + return v1.NewRuleSet(rules...) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/pretty.go b/vendor/github.com/open-policy-agent/opa/ast/pretty.go index b4f05ad5015..f2b8104e0a7 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/pretty.go +++ b/vendor/github.com/open-policy-agent/opa/ast/pretty.go @@ -5,78 +5,14 @@ package ast import ( - "fmt" "io" - "strings" + + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Pretty writes a pretty representation of the AST rooted at x to w. // // This is function is intended for debug purposes when inspecting ASTs. func Pretty(w io.Writer, x interface{}) { - pp := &prettyPrinter{ - depth: -1, - w: w, - } - NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x) -} - -type prettyPrinter struct { - depth int - w io.Writer -} - -func (pp *prettyPrinter) Before(x interface{}) bool { - switch x.(type) { - case *Term: - default: - pp.depth++ - } - - switch x := x.(type) { - case *Term: - return false - case Args: - if len(x) == 0 { - return false - } - pp.writeType(x) - case *Expr: - extras := []string{} - if x.Negated { - extras = append(extras, "negated") - } - extras = append(extras, fmt.Sprintf("index=%d", x.Index)) - pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " ")) - case Null, Boolean, Number, String, Var: - pp.writeValue(x) - default: - pp.writeType(x) - } - return false -} - -func (pp *prettyPrinter) After(x interface{}) { - switch x.(type) { - case *Term: - default: - pp.depth-- - } -} - -func (pp *prettyPrinter) writeValue(x interface{}) { - pp.writeIndent(fmt.Sprint(x)) -} - -func (pp *prettyPrinter) writeType(x interface{}) { - pp.writeIndent(TypeName(x)) -} - -func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) { - pad := strings.Repeat(" ", pp.depth) - pp.write(pad+f, a...) -} - -func (pp *prettyPrinter) write(f string, a ...interface{}) { - fmt.Fprintf(pp.w, f+"\n", a...) + v1.Pretty(w, x) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/schema.go b/vendor/github.com/open-policy-agent/opa/ast/schema.go index 8c96ac624ea..979958a3c00 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/schema.go +++ b/vendor/github.com/open-policy-agent/opa/ast/schema.go @@ -5,59 +5,13 @@ package ast import ( - "fmt" - - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // SchemaSet holds a map from a path to a schema. -type SchemaSet struct { - m *util.HashMap -} +type SchemaSet = v1.SchemaSet // NewSchemaSet returns an empty SchemaSet. func NewSchemaSet() *SchemaSet { - - eqFunc := func(a, b util.T) bool { - return a.(Ref).Equal(b.(Ref)) - } - - hashFunc := func(x util.T) int { return x.(Ref).Hash() } - - return &SchemaSet{ - m: util.NewHashMap(eqFunc, hashFunc), - } -} - -// Put inserts a raw schema into the set. -func (ss *SchemaSet) Put(path Ref, raw interface{}) { - ss.m.Put(path, raw) -} - -// Get returns the raw schema identified by the path. -func (ss *SchemaSet) Get(path Ref) interface{} { - if ss == nil { - return nil - } - x, ok := ss.m.Get(path) - if !ok { - return nil - } - return x -} - -func loadSchema(raw interface{}, allowNet []string) (types.Type, error) { - - jsonSchema, err := compileSchema(raw, allowNet) - if err != nil { - return nil, err - } - - tpe, err := newSchemaParser().parseSchema(jsonSchema.RootSchema) - if err != nil { - return nil, fmt.Errorf("type checking: %w", err) - } - - return tpe, nil + return v1.NewSchemaSet() } diff --git a/vendor/github.com/open-policy-agent/opa/ast/strings.go b/vendor/github.com/open-policy-agent/opa/ast/strings.go index e489f6977c9..ef9354bf78b 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/strings.go +++ b/vendor/github.com/open-policy-agent/opa/ast/strings.go @@ -5,14 +5,10 @@ package ast import ( - "reflect" - "strings" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // TypeName returns a human readable name for the AST element type. func TypeName(x interface{}) string { - if _, ok := x.(*lazyObj); ok { - return "object" - } - return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name()) + return v1.TypeName(x) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/term.go b/vendor/github.com/open-policy-agent/opa/ast/term.go index ce8ee4853d3..a5d146ea27c 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/term.go +++ b/vendor/github.com/open-policy-agent/opa/ast/term.go @@ -1,40 +1,22 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// nolint: deadcode // Public API. package ast import ( - "bytes" "encoding/json" - "errors" - "fmt" "io" - "math" - "math/big" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/OneOfOne/xxhash" - - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/ast/location" - "github.com/open-policy-agent/opa/util" -) -var errFindNotFound = fmt.Errorf("find: not found") + v1 "github.com/open-policy-agent/opa/v1/ast" +) // Location records a position in source code. -type Location = location.Location +type Location = v1.Location // NewLocation returns a new Location object. func NewLocation(text []byte, file string, row int, col int) *Location { - return location.NewLocation(text, file, row, col) + return v1.NewLocation(text, file, row, col) } // Value declares the common interface for all Term values. Every kind of Term value @@ -45,226 +27,58 @@ func NewLocation(text []byte, file string, row int, col int) *Location { // - Variables, References // - Array, Set, and Object Comprehensions // - Calls -type Value interface { - Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively. - Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found. - Hash() int // Returns hash code of the value. - IsGround() bool // IsGround returns true if this value is not a variable or contains no variables. - String() string // String returns a human readable string representation of the value. -} +type Value = v1.Value // InterfaceToValue converts a native Go value x to a Value. func InterfaceToValue(x interface{}) (Value, error) { - switch x := x.(type) { - case nil: - return Null{}, nil - case bool: - return Boolean(x), nil - case json.Number: - return Number(x), nil - case int64: - return int64Number(x), nil - case uint64: - return uint64Number(x), nil - case float64: - return floatNumber(x), nil - case int: - return intNumber(x), nil - case string: - return String(x), nil - case []interface{}: - r := make([]*Term, len(x)) - for i, e := range x { - e, err := InterfaceToValue(e) - if err != nil { - return nil, err - } - r[i] = &Term{Value: e} - } - return NewArray(r...), nil - case map[string]interface{}: - r := newobject(len(x)) - for k, v := range x { - k, err := InterfaceToValue(k) - if err != nil { - return nil, err - } - v, err := InterfaceToValue(v) - if err != nil { - return nil, err - } - r.Insert(NewTerm(k), NewTerm(v)) - } - return r, nil - case map[string]string: - r := newobject(len(x)) - for k, v := range x { - k, err := InterfaceToValue(k) - if err != nil { - return nil, err - } - v, err := InterfaceToValue(v) - if err != nil { - return nil, err - } - r.Insert(NewTerm(k), NewTerm(v)) - } - return r, nil - default: - ptr := util.Reference(x) - if err := util.RoundTrip(ptr); err != nil { - return nil, fmt.Errorf("ast: interface conversion: %w", err) - } - return InterfaceToValue(*ptr) - } + return v1.InterfaceToValue(x) } // ValueFromReader returns an AST value from a JSON serialized value in the reader. func ValueFromReader(r io.Reader) (Value, error) { - var x interface{} - if err := util.NewJSONDecoder(r).Decode(&x); err != nil { - return nil, err - } - return InterfaceToValue(x) + return v1.ValueFromReader(r) } // As converts v into a Go native type referred to by x. func As(v Value, x interface{}) error { - return util.NewJSONDecoder(bytes.NewBufferString(v.String())).Decode(x) + return v1.As(v, x) } // Resolver defines the interface for resolving references to native Go values. -type Resolver interface { - Resolve(Ref) (interface{}, error) -} +type Resolver = v1.Resolver // ValueResolver defines the interface for resolving references to AST values. -type ValueResolver interface { - Resolve(Ref) (Value, error) -} +type ValueResolver = v1.ValueResolver // UnknownValueErr indicates a ValueResolver was unable to resolve a reference // because the reference refers to an unknown value. -type UnknownValueErr struct{} - -func (UnknownValueErr) Error() string { - return "unknown value" -} +type UnknownValueErr = v1.UnknownValueErr // IsUnknownValueErr returns true if the err is an UnknownValueErr. func IsUnknownValueErr(err error) bool { - _, ok := err.(UnknownValueErr) - return ok -} - -type illegalResolver struct{} - -func (illegalResolver) Resolve(ref Ref) (interface{}, error) { - return nil, fmt.Errorf("illegal value: %v", ref) + return v1.IsUnknownValueErr(err) } // ValueToInterface returns the Go representation of an AST value. The AST // value should not contain any values that require evaluation (e.g., vars, // comprehensions, etc.) func ValueToInterface(v Value, resolver Resolver) (interface{}, error) { - return valueToInterface(v, resolver, JSONOpt{}) -} - -func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (interface{}, error) { - switch v := v.(type) { - case Null: - return nil, nil - case Boolean: - return bool(v), nil - case Number: - return json.Number(v), nil - case String: - return string(v), nil - case *Array: - buf := []interface{}{} - for i := 0; i < v.Len(); i++ { - x1, err := valueToInterface(v.Elem(i).Value, resolver, opt) - if err != nil { - return nil, err - } - buf = append(buf, x1) - } - return buf, nil - case *object: - buf := make(map[string]interface{}, v.Len()) - err := v.Iter(func(k, v *Term) error { - ki, err := valueToInterface(k.Value, resolver, opt) - if err != nil { - return err - } - var str string - var ok bool - if str, ok = ki.(string); !ok { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(ki); err != nil { - return err - } - str = strings.TrimSpace(buf.String()) - } - vi, err := valueToInterface(v.Value, resolver, opt) - if err != nil { - return err - } - buf[str] = vi - return nil - }) - if err != nil { - return nil, err - } - return buf, nil - case *lazyObj: - if opt.CopyMaps { - return valueToInterface(v.force(), resolver, opt) - } - return v.native, nil - case Set: - buf := []interface{}{} - iter := func(x *Term) error { - x1, err := valueToInterface(x.Value, resolver, opt) - if err != nil { - return err - } - buf = append(buf, x1) - return nil - } - var err error - if opt.SortSets { - err = v.Sorted().Iter(iter) - } else { - err = v.Iter(iter) - } - if err != nil { - return nil, err - } - return buf, nil - case Ref: - return resolver.Resolve(v) - default: - return nil, fmt.Errorf("%v requires evaluation", TypeName(v)) - } + return v1.ValueToInterface(v, resolver) } // JSON returns the JSON representation of v. The value must not contain any // refs or terms that require evaluation (e.g., vars, comprehensions, etc.) func JSON(v Value) (interface{}, error) { - return JSONWithOpt(v, JSONOpt{}) + return v1.JSON(v) } // JSONOpt defines parameters for AST to JSON conversion. -type JSONOpt struct { - SortSets bool // sort sets before serializing (this makes conversion more expensive) - CopyMaps bool // enforces copying of map[string]interface{} read from the store -} +type JSONOpt = v1.JSONOpt // JSONWithOpt returns the JSON representation of v. The value must not contain any // refs or terms that require evaluation (e.g., vars, comprehensions, etc.) func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) { - return valueToInterface(v, illegalResolver{}, opt) + return v1.JSONWithOpt(v, opt) } // MustJSON returns the JSON representation of v. The value must not contain any @@ -272,3003 +86,221 @@ func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) { // the conversion fails, this function will panic. This function is mostly for // test purposes. func MustJSON(v Value) interface{} { - r, err := JSON(v) - if err != nil { - panic(err) - } - return r + return v1.MustJSON(v) } // MustInterfaceToValue converts a native Go value x to a Value. If the // conversion fails, this function will panic. This function is mostly for test // purposes. func MustInterfaceToValue(x interface{}) Value { - v, err := InterfaceToValue(x) - if err != nil { - panic(err) - } - return v + return v1.MustInterfaceToValue(x) } // Term is an argument to a function. -type Term struct { - Value Value `json:"value"` // the value of the Term as represented in Go - Location *Location `json:"location,omitempty"` // the location of the Term in the source - - jsonOptions astJSON.Options -} +type Term = v1.Term // NewTerm returns a new Term object. func NewTerm(v Value) *Term { - return &Term{ - Value: v, - } -} - -// SetLocation updates the term's Location and returns the term itself. -func (term *Term) SetLocation(loc *Location) *Term { - term.Location = loc - return term -} - -// Loc returns the Location of term. -func (term *Term) Loc() *Location { - if term == nil { - return nil - } - return term.Location -} - -// SetLoc sets the location on term. -func (term *Term) SetLoc(loc *Location) { - term.SetLocation(loc) -} - -// Copy returns a deep copy of term. -func (term *Term) Copy() *Term { - - if term == nil { - return nil - } - - cpy := *term - - switch v := term.Value.(type) { - case Null, Boolean, Number, String, Var: - cpy.Value = v - case Ref: - cpy.Value = v.Copy() - case *Array: - cpy.Value = v.Copy() - case Set: - cpy.Value = v.Copy() - case *object: - cpy.Value = v.Copy() - case *ArrayComprehension: - cpy.Value = v.Copy() - case *ObjectComprehension: - cpy.Value = v.Copy() - case *SetComprehension: - cpy.Value = v.Copy() - case Call: - cpy.Value = v.Copy() - } - - return &cpy -} - -// Equal returns true if this term equals the other term. Equality is -// defined for each kind of term. -func (term *Term) Equal(other *Term) bool { - if term == nil && other != nil { - return false - } - if term != nil && other == nil { - return false - } - if term == other { - return true - } - - // TODO(tsandall): This early-exit avoids allocations for types that have - // Equal() functions that just use == underneath. We should revisit the - // other types and implement Equal() functions that do not require - // allocations. - switch v := term.Value.(type) { - case Null: - return v.Equal(other.Value) - case Boolean: - return v.Equal(other.Value) - case Number: - return v.Equal(other.Value) - case String: - return v.Equal(other.Value) - case Var: - return v.Equal(other.Value) - } - - return term.Value.Compare(other.Value) == 0 -} - -// Get returns a value referred to by name from the term. -func (term *Term) Get(name *Term) *Term { - switch v := term.Value.(type) { - case *object: - return v.Get(name) - case *Array: - return v.Get(name) - case interface { - Get(*Term) *Term - }: - return v.Get(name) - case Set: - if v.Contains(name) { - return name - } - } - return nil -} - -// Hash returns the hash code of the Term's Value. Its Location -// is ignored. -func (term *Term) Hash() int { - return term.Value.Hash() -} - -// IsGround returns true if this term's Value is ground. -func (term *Term) IsGround() bool { - return term.Value.IsGround() -} - -func (term *Term) setJSONOptions(opts astJSON.Options) { - term.jsonOptions = opts - if term.Location != nil { - term.Location.JSONOptions = opts - } -} - -// MarshalJSON returns the JSON encoding of the term. -// -// Specialized marshalling logic is required to include a type hint for Value. -func (term *Term) MarshalJSON() ([]byte, error) { - d := map[string]interface{}{ - "type": TypeName(term.Value), - "value": term.Value, - } - if term.jsonOptions.MarshalOptions.IncludeLocation.Term { - if term.Location != nil { - d["location"] = term.Location - } - } - return json.Marshal(d) -} - -func (term *Term) String() string { - return term.Value.String() -} - -// UnmarshalJSON parses the byte array and stores the result in term. -// Specialized unmarshalling is required to handle Value and Location. -func (term *Term) UnmarshalJSON(bs []byte) error { - v := map[string]interface{}{} - if err := util.UnmarshalJSON(bs, &v); err != nil { - return err - } - val, err := unmarshalValue(v) - if err != nil { - return err - } - term.Value = val - - if loc, ok := v["location"].(map[string]interface{}); ok { - term.Location = &Location{} - err := unmarshalLocation(term.Location, loc) - if err != nil { - return err - } - } - return nil -} - -// Vars returns a VarSet with variables contained in this term. -func (term *Term) Vars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - vis.Walk(term) - return vis.vars + return v1.NewTerm(v) } // IsConstant returns true if the AST value is constant. func IsConstant(v Value) bool { - found := false - vis := GenericVisitor{ - func(x interface{}) bool { - switch x.(type) { - case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: - found = true - return true - } - return false - }, - } - vis.Walk(v) - return !found + return v1.IsConstant(v) } // IsComprehension returns true if the supplied value is a comprehension. func IsComprehension(x Value) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return true - } - return false + return v1.IsComprehension(x) } // ContainsRefs returns true if the Value v contains refs. func ContainsRefs(v interface{}) bool { - found := false - WalkRefs(v, func(Ref) bool { - found = true - return found - }) - return found + return v1.ContainsRefs(v) } // ContainsComprehensions returns true if the Value v contains comprehensions. func ContainsComprehensions(v interface{}) bool { - found := false - WalkClosures(v, func(x interface{}) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - found = true - return found - } - return found - }) - return found + return v1.ContainsComprehensions(v) } // ContainsClosures returns true if the Value v contains closures. func ContainsClosures(v interface{}) bool { - found := false - WalkClosures(v, func(x interface{}) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: - found = true - return found - } - return found - }) - return found + return v1.ContainsClosures(v) } // IsScalar returns true if the AST value is a scalar. func IsScalar(v Value) bool { - switch v.(type) { - case String: - return true - case Number: - return true - case Boolean: - return true - case Null: - return true - } - return false + return v1.IsScalar(v) } // Null represents the null value defined by JSON. -type Null struct{} +type Null = v1.Null // NullTerm creates a new Term with a Null value. func NullTerm() *Term { - return &Term{Value: Null{}} -} - -// Equal returns true if the other term Value is also Null. -func (null Null) Equal(other Value) bool { - switch other.(type) { - case Null: - return true - default: - return false - } -} - -// Compare compares null to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (null Null) Compare(other Value) int { - return Compare(null, other) -} - -// Find returns the current value or a not found error. -func (null Null) Find(path Ref) (Value, error) { - if len(path) == 0 { - return null, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (null Null) Hash() int { - return 0 -} - -// IsGround always returns true. -func (Null) IsGround() bool { - return true -} - -func (null Null) String() string { - return "null" + return v1.NullTerm() } // Boolean represents a boolean value defined by JSON. -type Boolean bool +type Boolean = v1.Boolean // BooleanTerm creates a new Term with a Boolean value. func BooleanTerm(b bool) *Term { - return &Term{Value: Boolean(b)} -} - -// Equal returns true if the other Value is a Boolean and is equal. -func (bol Boolean) Equal(other Value) bool { - switch other := other.(type) { - case Boolean: - return bol == other - default: - return false - } -} - -// Compare compares bol to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (bol Boolean) Compare(other Value) int { - return Compare(bol, other) -} - -// Find returns the current value or a not found error. -func (bol Boolean) Find(path Ref) (Value, error) { - if len(path) == 0 { - return bol, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (bol Boolean) Hash() int { - if bol { - return 1 - } - return 0 -} - -// IsGround always returns true. -func (Boolean) IsGround() bool { - return true -} - -func (bol Boolean) String() string { - return strconv.FormatBool(bool(bol)) + return v1.BooleanTerm(b) } // Number represents a numeric value as defined by JSON. -type Number json.Number +type Number = v1.Number // NumberTerm creates a new Term with a Number value. func NumberTerm(n json.Number) *Term { - return &Term{Value: Number(n)} + return v1.NumberTerm(n) } // IntNumberTerm creates a new Term with an integer Number value. func IntNumberTerm(i int) *Term { - return &Term{Value: Number(strconv.Itoa(i))} + return v1.IntNumberTerm(i) } // UIntNumberTerm creates a new Term with an unsigned integer Number value. func UIntNumberTerm(u uint64) *Term { - return &Term{Value: uint64Number(u)} + return v1.UIntNumberTerm(u) } // FloatNumberTerm creates a new Term with a floating point Number value. func FloatNumberTerm(f float64) *Term { - s := strconv.FormatFloat(f, 'g', -1, 64) - return &Term{Value: Number(s)} -} - -// Equal returns true if the other Value is a Number and is equal. -func (num Number) Equal(other Value) bool { - switch other := other.(type) { - case Number: - return Compare(num, other) == 0 - default: - return false - } -} - -// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (num Number) Compare(other Value) int { - return Compare(num, other) -} - -// Find returns the current value or a not found error. -func (num Number) Find(path Ref) (Value, error) { - if len(path) == 0 { - return num, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (num Number) Hash() int { - f, err := json.Number(num).Float64() - if err != nil { - bs := []byte(num) - h := xxhash.Checksum64(bs) - return int(h) - } - return int(f) -} - -// Int returns the int representation of num if possible. -func (num Number) Int() (int, bool) { - i64, ok := num.Int64() - return int(i64), ok -} - -// Int64 returns the int64 representation of num if possible. -func (num Number) Int64() (int64, bool) { - i, err := json.Number(num).Int64() - if err != nil { - return 0, false - } - return i, true -} - -// Float64 returns the float64 representation of num if possible. -func (num Number) Float64() (float64, bool) { - f, err := json.Number(num).Float64() - if err != nil { - return 0, false - } - return f, true -} - -// IsGround always returns true. -func (Number) IsGround() bool { - return true -} - -// MarshalJSON returns JSON encoded bytes representing num. -func (num Number) MarshalJSON() ([]byte, error) { - return json.Marshal(json.Number(num)) -} - -func (num Number) String() string { - return string(num) -} - -func intNumber(i int) Number { - return Number(strconv.Itoa(i)) -} - -func int64Number(i int64) Number { - return Number(strconv.FormatInt(i, 10)) -} - -func uint64Number(u uint64) Number { - return Number(strconv.FormatUint(u, 10)) -} - -func floatNumber(f float64) Number { - return Number(strconv.FormatFloat(f, 'g', -1, 64)) + return v1.FloatNumberTerm(f) } // String represents a string value as defined by JSON. -type String string +type String = v1.String // StringTerm creates a new Term with a String value. func StringTerm(s string) *Term { - return &Term{Value: String(s)} -} - -// Equal returns true if the other Value is a String and is equal. -func (str String) Equal(other Value) bool { - switch other := other.(type) { - case String: - return str == other - default: - return false - } -} - -// Compare compares str to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (str String) Compare(other Value) int { - return Compare(str, other) -} - -// Find returns the current value or a not found error. -func (str String) Find(path Ref) (Value, error) { - if len(path) == 0 { - return str, nil - } - return nil, errFindNotFound -} - -// IsGround always returns true. -func (String) IsGround() bool { - return true -} - -func (str String) String() string { - return strconv.Quote(string(str)) -} - -// Hash returns the hash code for the Value. -func (str String) Hash() int { - h := xxhash.ChecksumString64S(string(str), hashSeed0) - return int(h) + return v1.StringTerm(s) } // Var represents a variable as defined by the language. -type Var string +type Var = v1.Var // VarTerm creates a new Term with a Variable value. func VarTerm(v string) *Term { - return &Term{Value: Var(v)} -} - -// Equal returns true if the other Value is a Variable and has the same value -// (name). -func (v Var) Equal(other Value) bool { - switch other := other.(type) { - case Var: - return v == other - default: - return false - } -} - -// Compare compares v to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (v Var) Compare(other Value) int { - return Compare(v, other) -} - -// Find returns the current value or a not found error. -func (v Var) Find(path Ref) (Value, error) { - if len(path) == 0 { - return v, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (v Var) Hash() int { - h := xxhash.ChecksumString64S(string(v), hashSeed0) - return int(h) -} - -// IsGround always returns false. -func (Var) IsGround() bool { - return false -} - -// IsWildcard returns true if this is a wildcard variable. -func (v Var) IsWildcard() bool { - return strings.HasPrefix(string(v), WildcardPrefix) -} - -// IsGenerated returns true if this variable was generated during compilation. -func (v Var) IsGenerated() bool { - return strings.HasPrefix(string(v), "__local") -} - -func (v Var) String() string { - // Special case for wildcard so that string representation is parseable. The - // parser mangles wildcard variables to make their names unique and uses an - // illegal variable name character (WildcardPrefix) to avoid conflicts. When - // we serialize the variable here, we need to make sure it's parseable. - if v.IsWildcard() { - return Wildcard.String() - } - return string(v) + return v1.VarTerm(v) } // Ref represents a reference as defined by the language. -type Ref []*Term +type Ref = v1.Ref // EmptyRef returns a new, empty reference. func EmptyRef() Ref { - return Ref([]*Term{}) + return v1.EmptyRef() } // PtrRef returns a new reference against the head for the pointer // s. Path components in the pointer are unescaped. func PtrRef(head *Term, s string) (Ref, error) { - s = strings.Trim(s, "/") - if s == "" { - return Ref{head}, nil - } - parts := strings.Split(s, "/") - if maxLen := math.MaxInt32; len(parts) >= maxLen { - return nil, fmt.Errorf("path too long: %s, %d > %d (max)", s, len(parts), maxLen) - } - ref := make(Ref, uint(len(parts))+1) - ref[0] = head - for i := 0; i < len(parts); i++ { - var err error - parts[i], err = url.PathUnescape(parts[i]) - if err != nil { - return nil, err - } - ref[i+1] = StringTerm(parts[i]) - } - return ref, nil + return v1.PtrRef(head, s) } // RefTerm creates a new Term with a Ref value. func RefTerm(r ...*Term) *Term { - return &Term{Value: Ref(r)} -} - -// Append returns a copy of ref with the term appended to the end. -func (ref Ref) Append(term *Term) Ref { - n := len(ref) - dst := make(Ref, n+1) - copy(dst, ref) - dst[n] = term - return dst -} - -// Insert returns a copy of the ref with x inserted at pos. If pos < len(ref), -// existing elements are shifted to the right. If pos > len(ref)+1 this -// function panics. -func (ref Ref) Insert(x *Term, pos int) Ref { - switch { - case pos == len(ref): - return ref.Append(x) - case pos > len(ref)+1: - panic("illegal index") - } - cpy := make(Ref, len(ref)+1) - copy(cpy, ref[:pos]) - cpy[pos] = x - copy(cpy[pos+1:], ref[pos:]) - return cpy -} - -// Extend returns a copy of ref with the terms from other appended. The head of -// other will be converted to a string. -func (ref Ref) Extend(other Ref) Ref { - dst := make(Ref, len(ref)+len(other)) - copy(dst, ref) - - head := other[0].Copy() - head.Value = String(head.Value.(Var)) - offset := len(ref) - dst[offset] = head - - copy(dst[offset+1:], other[1:]) - return dst -} - -// Concat returns a ref with the terms appended. -func (ref Ref) Concat(terms []*Term) Ref { - if len(terms) == 0 { - return ref - } - cpy := make(Ref, len(ref)+len(terms)) - copy(cpy, ref) - copy(cpy[len(ref):], terms) - return cpy -} - -// Dynamic returns the offset of the first non-constant operand of ref. -func (ref Ref) Dynamic() int { - switch ref[0].Value.(type) { - case Call: - return 0 - } - for i := 1; i < len(ref); i++ { - if !IsConstant(ref[i].Value) { - return i - } - } - return -1 -} - -// Copy returns a deep copy of ref. -func (ref Ref) Copy() Ref { - return termSliceCopy(ref) -} - -// Equal returns true if ref is equal to other. -func (ref Ref) Equal(other Value) bool { - return Compare(ref, other) == 0 -} - -// Compare compares ref to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (ref Ref) Compare(other Value) int { - return Compare(ref, other) -} - -// Find returns the current value or a "not found" error. -func (ref Ref) Find(path Ref) (Value, error) { - if len(path) == 0 { - return ref, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (ref Ref) Hash() int { - return termSliceHash(ref) -} - -// HasPrefix returns true if the other ref is a prefix of this ref. -func (ref Ref) HasPrefix(other Ref) bool { - if len(other) > len(ref) { - return false - } - for i := range other { - if !ref[i].Equal(other[i]) { - return false - } - } - return true -} - -// ConstantPrefix returns the constant portion of the ref starting from the head. -func (ref Ref) ConstantPrefix() Ref { - ref = ref.Copy() - - i := ref.Dynamic() - if i < 0 { - return ref - } - return ref[:i] -} - -func (ref Ref) StringPrefix() Ref { - r := ref.Copy() - - for i := 1; i < len(ref); i++ { - switch r[i].Value.(type) { - case String: // pass - default: // cut off - return r[:i] - } - } - - return r -} - -// GroundPrefix returns the ground portion of the ref starting from the head. By -// definition, the head of the reference is always ground. -func (ref Ref) GroundPrefix() Ref { - prefix := make(Ref, 0, len(ref)) - - for i, x := range ref { - if i > 0 && !x.IsGround() { - break - } - prefix = append(prefix, x) - } - - return prefix -} - -func (ref Ref) DynamicSuffix() Ref { - i := ref.Dynamic() - if i < 0 { - return nil - } - return ref[i:] -} - -// IsGround returns true if all of the parts of the Ref are ground. -func (ref Ref) IsGround() bool { - if len(ref) == 0 { - return true - } - return termSliceIsGround(ref[1:]) -} - -// IsNested returns true if this ref contains other Refs. -func (ref Ref) IsNested() bool { - for _, x := range ref { - if _, ok := x.Value.(Ref); ok { - return true - } - } - return false -} - -// Ptr returns a slash-separated path string for this ref. If the ref -// contains non-string terms this function returns an error. Path -// components are escaped. -func (ref Ref) Ptr() (string, error) { - parts := make([]string, 0, len(ref)-1) - for _, term := range ref[1:] { - if str, ok := term.Value.(String); ok { - parts = append(parts, url.PathEscape(string(str))) - } else { - return "", fmt.Errorf("invalid path value type") - } - } - return strings.Join(parts, "/"), nil + return v1.RefTerm(r...) } -var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") - func IsVarCompatibleString(s string) bool { - return varRegexp.MatchString(s) -} - -func (ref Ref) String() string { - if len(ref) == 0 { - return "" - } - buf := []string{ref[0].Value.String()} - path := ref[1:] - for _, p := range path { - switch p := p.Value.(type) { - case String: - str := string(p) - if varRegexp.MatchString(str) && len(buf) > 0 && !IsKeyword(str) { - buf = append(buf, "."+str) - } else { - buf = append(buf, "["+p.String()+"]") - } - default: - buf = append(buf, "["+p.String()+"]") - } - } - return strings.Join(buf, "") -} - -// OutputVars returns a VarSet containing variables that would be bound by evaluating -// this expression in isolation. -func (ref Ref) OutputVars() VarSet { - vis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) - vis.Walk(ref) - return vis.Vars() -} - -func (ref Ref) toArray() *Array { - a := NewArray() - for _, term := range ref { - if _, ok := term.Value.(String); ok { - a = a.Append(term) - } else { - a = a.Append(StringTerm(term.Value.String())) - } - } - return a + return v1.IsVarCompatibleString(s) } // QueryIterator defines the interface for querying AST documents with references. -type QueryIterator func(map[Var]Value, Value) error +type QueryIterator = v1.QueryIterator // ArrayTerm creates a new Term with an Array value. func ArrayTerm(a ...*Term) *Term { - return NewTerm(NewArray(a...)) + return v1.ArrayTerm(a...) } // NewArray creates an Array with the terms provided. The array will // use the provided term slice. func NewArray(a ...*Term) *Array { - hs := make([]int, len(a)) - for i, e := range a { - hs[i] = e.Value.Hash() - } - arr := &Array{elems: a, hashs: hs, ground: termSliceIsGround(a)} - arr.rehash() - return arr + return v1.NewArray(a...) } // Array represents an array as defined by the language. Arrays are similar to the // same types as defined by JSON with the exception that they can contain Vars // and References. -type Array struct { - elems []*Term - hashs []int // element hashes - hash int - ground bool -} - -// Copy returns a deep copy of arr. -func (arr *Array) Copy() *Array { - cpy := make([]int, len(arr.elems)) - copy(cpy, arr.hashs) - return &Array{ - elems: termSliceCopy(arr.elems), - hashs: cpy, - hash: arr.hash, - ground: arr.IsGround()} -} - -// Equal returns true if arr is equal to other. -func (arr *Array) Equal(other Value) bool { - return Compare(arr, other) == 0 -} - -// Compare compares arr to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (arr *Array) Compare(other Value) int { - return Compare(arr, other) -} - -// Find returns the value at the index or an out-of-range error. -func (arr *Array) Find(path Ref) (Value, error) { - if len(path) == 0 { - return arr, nil - } - num, ok := path[0].Value.(Number) - if !ok { - return nil, errFindNotFound - } - i, ok := num.Int() - if !ok { - return nil, errFindNotFound - } - if i < 0 || i >= arr.Len() { - return nil, errFindNotFound - } - return arr.Elem(i).Value.Find(path[1:]) -} - -// Get returns the element at pos or nil if not possible. -func (arr *Array) Get(pos *Term) *Term { - num, ok := pos.Value.(Number) - if !ok { - return nil - } - - i, ok := num.Int() - if !ok { - return nil - } - - if i >= 0 && i < len(arr.elems) { - return arr.elems[i] - } - - return nil -} - -// Sorted returns a new Array that contains the sorted elements of arr. -func (arr *Array) Sorted() *Array { - cpy := make([]*Term, len(arr.elems)) - for i := range cpy { - cpy[i] = arr.elems[i] - } - sort.Sort(termSlice(cpy)) - a := NewArray(cpy...) - a.hashs = arr.hashs - return a -} - -// Hash returns the hash code for the Value. -func (arr *Array) Hash() int { - return arr.hash -} - -// IsGround returns true if all of the Array elements are ground. -func (arr *Array) IsGround() bool { - return arr.ground -} - -// MarshalJSON returns JSON encoded bytes representing arr. -func (arr *Array) MarshalJSON() ([]byte, error) { - if len(arr.elems) == 0 { - return []byte(`[]`), nil - } - return json.Marshal(arr.elems) -} - -func (arr *Array) String() string { - var b strings.Builder - b.WriteRune('[') - for i, e := range arr.elems { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(e.String()) - } - b.WriteRune(']') - return b.String() -} - -// Len returns the number of elements in the array. -func (arr *Array) Len() int { - return len(arr.elems) -} - -// Elem returns the element i of arr. -func (arr *Array) Elem(i int) *Term { - return arr.elems[i] -} - -// Set sets the element i of arr. -func (arr *Array) Set(i int, v *Term) { - arr.set(i, v) -} - -// rehash updates the cached hash of arr. -func (arr *Array) rehash() { - arr.hash = 0 - for _, h := range arr.hashs { - arr.hash += h - } -} - -// set sets the element i of arr. -func (arr *Array) set(i int, v *Term) { - arr.ground = arr.ground && v.IsGround() - arr.elems[i] = v - arr.hashs[i] = v.Value.Hash() - arr.rehash() -} - -// Slice returns a slice of arr starting from i index to j. -1 -// indicates the end of the array. The returned value array is not a -// copy and any modifications to either of arrays may be reflected to -// the other. -func (arr *Array) Slice(i, j int) *Array { - var elems []*Term - var hashs []int - if j == -1 { - elems = arr.elems[i:] - hashs = arr.hashs[i:] - } else { - elems = arr.elems[i:j] - hashs = arr.hashs[i:j] - } - // If arr is ground, the slice is, too. - // If it's not, the slice could still be. - gr := arr.ground || termSliceIsGround(elems) - - s := &Array{elems: elems, hashs: hashs, ground: gr} - s.rehash() - return s -} - -// Iter calls f on each element in arr. If f returns an error, -// iteration stops and the return value is the error. -func (arr *Array) Iter(f func(*Term) error) error { - for i := range arr.elems { - if err := f(arr.elems[i]); err != nil { - return err - } - } - return nil -} - -// Until calls f on each element in arr. If f returns true, iteration stops. -func (arr *Array) Until(f func(*Term) bool) bool { - err := arr.Iter(func(t *Term) error { - if f(t) { - return errStop - } - return nil - }) - return err != nil -} - -// Foreach calls f on each element in arr. -func (arr *Array) Foreach(f func(*Term)) { - _ = arr.Iter(func(t *Term) error { - f(t) - return nil - }) // ignore error -} - -// Append appends a term to arr, returning the appended array. -func (arr *Array) Append(v *Term) *Array { - cpy := *arr - cpy.elems = append(arr.elems, v) - cpy.hashs = append(arr.hashs, v.Value.Hash()) - cpy.hash = arr.hash + v.Value.Hash() - cpy.ground = arr.ground && v.IsGround() - return &cpy -} +type Array = v1.Array // Set represents a set as defined by the language. -type Set interface { - Value - Len() int - Copy() Set - Diff(Set) Set - Intersect(Set) Set - Union(Set) Set - Add(*Term) - Iter(func(*Term) error) error - Until(func(*Term) bool) bool - Foreach(func(*Term)) - Contains(*Term) bool - Map(func(*Term) (*Term, error)) (Set, error) - Reduce(*Term, func(*Term, *Term) (*Term, error)) (*Term, error) - Sorted() *Array - Slice() []*Term -} +type Set = v1.Set // NewSet returns a new Set containing t. func NewSet(t ...*Term) Set { - s := newset(len(t)) - for i := range t { - s.Add(t[i]) - } - return s + return v1.NewSet(t...) } -func newset(n int) *set { - var keys []*Term - if n > 0 { - keys = make([]*Term, 0, n) - } - return &set{ - elems: make(map[int]*Term, n), - keys: keys, - hash: 0, - ground: true, - sortGuard: new(sync.Once), - } -} - -// SetTerm returns a new Term representing a set containing terms t. func SetTerm(t ...*Term) *Term { - set := NewSet(t...) - return &Term{ - Value: set, - } -} - -type set struct { - elems map[int]*Term - keys []*Term - hash int - ground bool - sortGuard *sync.Once // Prevents race condition around sorting. -} - -// Copy returns a deep copy of s. -func (s *set) Copy() Set { - cpy := newset(s.Len()) - s.Foreach(func(x *Term) { - cpy.Add(x.Copy()) - }) - cpy.hash = s.hash - cpy.ground = s.ground - return cpy -} - -// IsGround returns true if all terms in s are ground. -func (s *set) IsGround() bool { - return s.ground -} - -// Hash returns a hash code for s. -func (s *set) Hash() int { - return s.hash -} - -func (s *set) String() string { - if s.Len() == 0 { - return "set()" - } - var b strings.Builder - b.WriteRune('{') - for i := range s.sortedKeys() { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(s.keys[i].Value.String()) - } - b.WriteRune('}') - return b.String() -} - -func (s *set) sortedKeys() []*Term { - s.sortGuard.Do(func() { - sort.Sort(termSlice(s.keys)) - }) - return s.keys -} - -// Compare compares s to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (s *set) Compare(other Value) int { - o1 := sortOrder(s) - o2 := sortOrder(other) - if o1 < o2 { - return -1 - } else if o1 > o2 { - return 1 - } - t := other.(*set) - return termSliceCompare(s.sortedKeys(), t.sortedKeys()) -} - -// Find returns the set or dereferences the element itself. -func (s *set) Find(path Ref) (Value, error) { - if len(path) == 0 { - return s, nil - } - if !s.Contains(path[0]) { - return nil, errFindNotFound - } - return path[0].Value.Find(path[1:]) -} - -// Diff returns elements in s that are not in other. -func (s *set) Diff(other Set) Set { - r := NewSet() - s.Foreach(func(x *Term) { - if !other.Contains(x) { - r.Add(x) - } - }) - return r -} - -// Intersect returns the set containing elements in both s and other. -func (s *set) Intersect(other Set) Set { - o := other.(*set) - n, m := s.Len(), o.Len() - ss := s - so := o - if m < n { - ss = o - so = s - n = m - } - - r := newset(n) - ss.Foreach(func(x *Term) { - if so.Contains(x) { - r.Add(x) - } - }) - return r -} - -// Union returns the set containing all elements of s and other. -func (s *set) Union(other Set) Set { - r := NewSet() - s.Foreach(func(x *Term) { - r.Add(x) - }) - other.Foreach(func(x *Term) { - r.Add(x) - }) - return r -} - -// Add updates s to include t. -func (s *set) Add(t *Term) { - s.insert(t) -} - -// Iter calls f on each element in s. If f returns an error, iteration stops -// and the return value is the error. -func (s *set) Iter(f func(*Term) error) error { - for i := range s.sortedKeys() { - if err := f(s.keys[i]); err != nil { - return err - } - } - return nil -} - -var errStop = errors.New("stop") - -// Until calls f on each element in s. If f returns true, iteration stops. -func (s *set) Until(f func(*Term) bool) bool { - err := s.Iter(func(t *Term) error { - if f(t) { - return errStop - } - return nil - }) - return err != nil -} - -// Foreach calls f on each element in s. -func (s *set) Foreach(f func(*Term)) { - _ = s.Iter(func(t *Term) error { - f(t) - return nil - }) // ignore error -} - -// Map returns a new Set obtained by applying f to each value in s. -func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) { - set := NewSet() - err := s.Iter(func(x *Term) error { - term, err := f(x) - if err != nil { - return err - } - set.Add(term) - return nil - }) - if err != nil { - return nil, err - } - return set, nil -} - -// Reduce returns a Term produced by applying f to each value in s. The first -// argument to f is the reduced value (starting with i) and the second argument -// to f is the element in s. -func (s *set) Reduce(i *Term, f func(*Term, *Term) (*Term, error)) (*Term, error) { - err := s.Iter(func(x *Term) error { - var err error - i, err = f(i, x) - if err != nil { - return err - } - return nil - }) - return i, err -} - -// Contains returns true if t is in s. -func (s *set) Contains(t *Term) bool { - return s.get(t) != nil -} - -// Len returns the number of elements in the set. -func (s *set) Len() int { - return len(s.keys) -} - -// MarshalJSON returns JSON encoded bytes representing s. -func (s *set) MarshalJSON() ([]byte, error) { - if s.keys == nil { - return []byte(`[]`), nil - } - return json.Marshal(s.sortedKeys()) -} - -// Sorted returns an Array that contains the sorted elements of s. -func (s *set) Sorted() *Array { - cpy := make([]*Term, len(s.keys)) - copy(cpy, s.sortedKeys()) - return NewArray(cpy...) -} - -// Slice returns a slice of terms contained in the set. -func (s *set) Slice() []*Term { - return s.sortedKeys() -} - -// NOTE(philipc): We assume a many-readers, single-writer model here. -// This method should NOT be used concurrently, or else we risk data races. -func (s *set) insert(x *Term) { - hash := x.Hash() - insertHash := hash - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := x.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr, ok := s.elems[insertHash]; ok; { - if equal(curr.Value) { - return - } - - insertHash++ - curr, ok = s.elems[insertHash] - } - - s.elems[insertHash] = x - // O(1) insertion, but we'll have to re-sort the keys later. - s.keys = append(s.keys, x) - // Reset the sync.Once instance. - // See https://github.com/golang/go/issues/25955 for why we do it this way. - s.sortGuard = new(sync.Once) - - s.hash += hash - s.ground = s.ground && x.IsGround() -} - -func (s *set) get(x *Term) *Term { - hash := x.Hash() - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := x.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - return false - - } - - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr, ok := s.elems[hash]; ok; { - if equal(curr.Value) { - return curr - } - - hash++ - curr, ok = s.elems[hash] - } - return nil + return v1.SetTerm(t...) } // Object represents an object as defined by the language. -type Object interface { - Value - Len() int - Get(*Term) *Term - Copy() Object - Insert(*Term, *Term) - Iter(func(*Term, *Term) error) error - Until(func(*Term, *Term) bool) bool - Foreach(func(*Term, *Term)) - Map(func(*Term, *Term) (*Term, *Term, error)) (Object, error) - Diff(other Object) Object - Intersect(other Object) [][3]*Term - Merge(other Object) (Object, bool) - MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) - Filter(filter Object) (Object, error) - Keys() []*Term - KeysIterator() ObjectKeysIterator - get(k *Term) *objectElem // To prevent external implementations -} +type Object = v1.Object // NewObject creates a new Object with t. func NewObject(t ...[2]*Term) Object { - obj := newobject(len(t)) - for i := range t { - obj.Insert(t[i][0], t[i][1]) - } - return obj + return v1.NewObject(t...) } // ObjectTerm creates a new Term with an Object value. func ObjectTerm(o ...[2]*Term) *Term { - return &Term{Value: NewObject(o...)} + return v1.ObjectTerm(o...) } func LazyObject(blob map[string]interface{}) Object { - return &lazyObj{native: blob, cache: map[string]Value{}} -} - -type lazyObj struct { - strict Object - cache map[string]Value - native map[string]interface{} -} - -func (l *lazyObj) force() Object { - if l.strict == nil { - l.strict = MustInterfaceToValue(l.native).(Object) - // NOTE(jf): a possible performance improvement here would be to check how many - // entries have been realized to AST in the cache, and if some threshold compared to the - // total number of keys is exceeded, realize the remaining entries and set l.strict to l.cache. - l.cache = map[string]Value{} // We don't need the cache anymore; drop it to free up memory. - } - return l.strict -} - -func (l *lazyObj) Compare(other Value) int { - o1 := sortOrder(l) - o2 := sortOrder(other) - if o1 < o2 { - return -1 - } else if o2 < o1 { - return 1 - } - return l.force().Compare(other) -} - -func (l *lazyObj) Copy() Object { - return l -} - -func (l *lazyObj) Diff(other Object) Object { - return l.force().Diff(other) -} - -func (l *lazyObj) Intersect(other Object) [][3]*Term { - return l.force().Intersect(other) -} - -func (l *lazyObj) Iter(f func(*Term, *Term) error) error { - return l.force().Iter(f) -} - -func (l *lazyObj) Until(f func(*Term, *Term) bool) bool { - // NOTE(sr): there could be benefits in not forcing here -- if we abort because - // `f` returns true, we could save us from converting the rest of the object. - return l.force().Until(f) -} - -func (l *lazyObj) Foreach(f func(*Term, *Term)) { - l.force().Foreach(f) -} - -func (l *lazyObj) Filter(filter Object) (Object, error) { - return l.force().Filter(filter) -} - -func (l *lazyObj) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { - return l.force().Map(f) -} - -func (l *lazyObj) MarshalJSON() ([]byte, error) { - return l.force().(*object).MarshalJSON() -} - -func (l *lazyObj) Merge(other Object) (Object, bool) { - return l.force().Merge(other) -} - -func (l *lazyObj) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { - return l.force().MergeWith(other, conflictResolver) -} - -func (l *lazyObj) Len() int { - return len(l.native) -} - -func (l *lazyObj) String() string { - return l.force().String() + return v1.LazyObject(blob) } -// get is merely there to implement the Object interface -- `get` there serves the -// purpose of prohibiting external implementations. It's never called for lazyObj. -func (*lazyObj) get(*Term) *objectElem { - return nil -} - -func (l *lazyObj) Get(k *Term) *Term { - if l.strict != nil { - return l.strict.Get(k) - } - if s, ok := k.Value.(String); ok { - if v, ok := l.cache[string(s)]; ok { - return NewTerm(v) - } - - if val, ok := l.native[string(s)]; ok { - var converted Value - switch val := val.(type) { - case map[string]interface{}: - converted = LazyObject(val) - default: - converted = MustInterfaceToValue(val) - } - l.cache[string(s)] = converted - return NewTerm(converted) - } - } - return nil -} - -func (l *lazyObj) Insert(k, v *Term) { - l.force().Insert(k, v) -} - -func (*lazyObj) IsGround() bool { - return true -} - -func (l *lazyObj) Hash() int { - return l.force().Hash() -} - -func (l *lazyObj) Keys() []*Term { - if l.strict != nil { - return l.strict.Keys() - } - ret := make([]*Term, 0, len(l.native)) - for k := range l.native { - ret = append(ret, StringTerm(k)) - } - sort.Sort(termSlice(ret)) - return ret -} - -func (l *lazyObj) KeysIterator() ObjectKeysIterator { - return &lazyObjKeysIterator{keys: l.Keys()} -} - -type lazyObjKeysIterator struct { - current int - keys []*Term -} - -func (ki *lazyObjKeysIterator) Next() (*Term, bool) { - if ki.current == len(ki.keys) { - return nil, false - } - ki.current++ - return ki.keys[ki.current-1], true -} - -func (l *lazyObj) Find(path Ref) (Value, error) { - if l.strict != nil { - return l.strict.Find(path) - } - if len(path) == 0 { - return l, nil - } - if p0, ok := path[0].Value.(String); ok { - if v, ok := l.cache[string(p0)]; ok { - return v.Find(path[1:]) - } - - if v, ok := l.native[string(p0)]; ok { - var converted Value - switch v := v.(type) { - case map[string]interface{}: - converted = LazyObject(v) - default: - converted = MustInterfaceToValue(v) - } - l.cache[string(p0)] = converted - return converted.Find(path[1:]) - } - } - return nil, errFindNotFound -} - -type object struct { - elems map[int]*objectElem - keys objectElemSlice - ground int // number of key and value grounds. Counting is - // required to support insert's key-value replace. - hash int - sortGuard *sync.Once // Prevents race condition around sorting. -} - -func newobject(n int) *object { - var keys objectElemSlice - if n > 0 { - keys = make(objectElemSlice, 0, n) - } - return &object{ - elems: make(map[int]*objectElem, n), - keys: keys, - ground: 0, - hash: 0, - sortGuard: new(sync.Once), - } -} - -type objectElem struct { - key *Term - value *Term - next *objectElem -} - -type objectElemSlice []*objectElem - -func (s objectElemSlice) Less(i, j int) bool { return Compare(s[i].key.Value, s[j].key.Value) < 0 } -func (s objectElemSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } -func (s objectElemSlice) Len() int { return len(s) } - // Item is a helper for constructing an tuple containing two Terms // representing a key/value pair in an Object. func Item(key, value *Term) [2]*Term { - return [2]*Term{key, value} -} - -func (obj *object) sortedKeys() objectElemSlice { - obj.sortGuard.Do(func() { - sort.Sort(obj.keys) - }) - return obj.keys -} - -// Compare compares obj to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (obj *object) Compare(other Value) int { - if x, ok := other.(*lazyObj); ok { - other = x.force() - } - o1 := sortOrder(obj) - o2 := sortOrder(other) - if o1 < o2 { - return -1 - } else if o2 < o1 { - return 1 - } - a := obj - b := other.(*object) - // Ensure that keys are in canonical sorted order before use! - akeys := a.sortedKeys() - bkeys := b.sortedKeys() - minLen := len(akeys) - if len(b.keys) < len(akeys) { - minLen = len(bkeys) - } - for i := 0; i < minLen; i++ { - keysCmp := Compare(akeys[i].key, bkeys[i].key) - if keysCmp < 0 { - return -1 - } - if keysCmp > 0 { - return 1 - } - valA := akeys[i].value - valB := bkeys[i].value - valCmp := Compare(valA, valB) - if valCmp != 0 { - return valCmp - } - } - if len(akeys) < len(bkeys) { - return -1 - } - if len(bkeys) < len(akeys) { - return 1 - } - return 0 -} - -// Find returns the value at the key or undefined. -func (obj *object) Find(path Ref) (Value, error) { - if len(path) == 0 { - return obj, nil - } - value := obj.Get(path[0]) - if value == nil { - return nil, errFindNotFound - } - return value.Value.Find(path[1:]) -} - -func (obj *object) Insert(k, v *Term) { - obj.insert(k, v) -} - -// Get returns the value of k in obj if k exists, otherwise nil. -func (obj *object) Get(k *Term) *Term { - if elem := obj.get(k); elem != nil { - return elem.value - } - return nil -} - -// Hash returns the hash code for the Value. -func (obj *object) Hash() int { - return obj.hash -} - -// IsGround returns true if all of the Object key/value pairs are ground. -func (obj *object) IsGround() bool { - return obj.ground == 2*len(obj.keys) -} - -// Copy returns a deep copy of obj. -func (obj *object) Copy() Object { - cpy, _ := obj.Map(func(k, v *Term) (*Term, *Term, error) { - return k.Copy(), v.Copy(), nil - }) - cpy.(*object).hash = obj.hash - return cpy -} - -// Diff returns a new Object that contains only the key/value pairs that exist in obj. -func (obj *object) Diff(other Object) Object { - r := NewObject() - obj.Foreach(func(k, v *Term) { - if other.Get(k) == nil { - r.Insert(k, v) - } - }) - return r -} - -// Intersect returns a slice of term triplets that represent the intersection of keys -// between obj and other. For each intersecting key, the values from obj and other are included -// as the last two terms in the triplet (respectively). -func (obj *object) Intersect(other Object) [][3]*Term { - r := [][3]*Term{} - obj.Foreach(func(k, v *Term) { - if v2 := other.Get(k); v2 != nil { - r = append(r, [3]*Term{k, v, v2}) - } - }) - return r -} - -// Iter calls the function f for each key-value pair in the object. If f -// returns an error, iteration stops and the error is returned. -func (obj *object) Iter(f func(*Term, *Term) error) error { - for _, node := range obj.sortedKeys() { - if err := f(node.key, node.value); err != nil { - return err - } - } - return nil -} - -// Until calls f for each key-value pair in the object. If f returns -// true, iteration stops and Until returns true. Otherwise, return -// false. -func (obj *object) Until(f func(*Term, *Term) bool) bool { - err := obj.Iter(func(k, v *Term) error { - if f(k, v) { - return errStop - } - return nil - }) - return err != nil -} - -// Foreach calls f for each key-value pair in the object. -func (obj *object) Foreach(f func(*Term, *Term)) { - _ = obj.Iter(func(k, v *Term) error { - f(k, v) - return nil - }) // ignore error -} - -// Map returns a new Object constructed by mapping each element in the object -// using the function f. -func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { - cpy := newobject(obj.Len()) - err := obj.Iter(func(k, v *Term) error { - var err error - k, v, err = f(k, v) - if err != nil { - return err - } - cpy.insert(k, v) - return nil - }) - if err != nil { - return nil, err - } - return cpy, nil -} - -// Keys returns the keys of obj. -func (obj *object) Keys() []*Term { - keys := make([]*Term, len(obj.keys)) - - for i, elem := range obj.sortedKeys() { - keys[i] = elem.key - } - - return keys -} - -// Returns an iterator over the obj's keys. -func (obj *object) KeysIterator() ObjectKeysIterator { - return newobjectKeysIterator(obj) -} - -// MarshalJSON returns JSON encoded bytes representing obj. -func (obj *object) MarshalJSON() ([]byte, error) { - sl := make([][2]*Term, obj.Len()) - for i, node := range obj.sortedKeys() { - sl[i] = Item(node.key, node.value) - } - return json.Marshal(sl) -} - -// Merge returns a new Object containing the non-overlapping keys of obj and other. If there are -// overlapping keys between obj and other, the values of associated with the keys are merged. Only -// objects can be merged with other objects. If the values cannot be merged, the second turn value -// will be false. -func (obj object) Merge(other Object) (Object, bool) { - return obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) { - obj1, ok1 := v1.Value.(Object) - obj2, ok2 := v2.Value.(Object) - if !ok1 || !ok2 { - return nil, true - } - obj3, ok := obj1.Merge(obj2) - if !ok { - return nil, true - } - return NewTerm(obj3), false - }) -} - -// MergeWith returns a new Object containing the merged keys of obj and other. -// If there are overlapping keys between obj and other, the conflictResolver -// is called. The conflictResolver can return a merged value and a boolean -// indicating if the merge has failed and should stop. -func (obj object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { - result := NewObject() - stop := obj.Until(func(k, v *Term) bool { - v2 := other.Get(k) - // The key didn't exist in other, keep the original value - if v2 == nil { - result.Insert(k, v) - return false - } - - // The key exists in both, resolve the conflict if possible - merged, stop := conflictResolver(v, v2) - if !stop { - result.Insert(k, merged) - } - return stop - }) - - if stop { - return nil, false - } - - // Copy in any values from other for keys that don't exist in obj - other.Foreach(func(k, v *Term) { - if v2 := obj.Get(k); v2 == nil { - result.Insert(k, v) - } - }) - return result, true -} - -// Filter returns a new object from values in obj where the keys are -// found in filter. Array indices for values can be specified as -// number strings. -func (obj *object) Filter(filter Object) (Object, error) { - filtered, err := filterObject(obj, filter) - if err != nil { - return nil, err - } - return filtered.(Object), nil -} - -// Len returns the number of elements in the object. -func (obj object) Len() int { - return len(obj.keys) -} - -func (obj object) String() string { - var b strings.Builder - b.WriteRune('{') - - for i, elem := range obj.sortedKeys() { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(elem.key.String()) - b.WriteString(": ") - b.WriteString(elem.value.String()) - } - b.WriteRune('}') - return b.String() -} - -func (obj *object) get(k *Term) *objectElem { - hash := k.Hash() - - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := k.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr := obj.elems[hash]; curr != nil; curr = curr.next { - if equal(curr.key.Value) { - return curr - } - } - return nil -} - -// NOTE(philipc): We assume a many-readers, single-writer model here. -// This method should NOT be used concurrently, or else we risk data races. -func (obj *object) insert(k, v *Term) { - hash := k.Hash() - head := obj.elems[hash] - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := k.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr := head; curr != nil; curr = curr.next { - if equal(curr.key.Value) { - // The ground bit of the value may change in - // replace, hence adjust the counter per old - // and new value. - - if curr.value.IsGround() { - obj.ground-- - } - if v.IsGround() { - obj.ground++ - } - - curr.value = v - - obj.rehash() - return - } - } - elem := &objectElem{ - key: k, - value: v, - next: head, - } - obj.elems[hash] = elem - // O(1) insertion, but we'll have to re-sort the keys later. - obj.keys = append(obj.keys, elem) - // Reset the sync.Once instance. - // See https://github.com/golang/go/issues/25955 for why we do it this way. - obj.sortGuard = new(sync.Once) - obj.hash += hash + v.Hash() - - if k.IsGround() { - obj.ground++ - } - if v.IsGround() { - obj.ground++ - } -} - -func (obj *object) rehash() { - // obj.keys is considered truth, from which obj.hash and obj.elems are recalculated. - - obj.hash = 0 - obj.elems = make(map[int]*objectElem, len(obj.keys)) - - for _, elem := range obj.keys { - hash := elem.key.Hash() - obj.hash += hash + elem.value.Hash() - obj.elems[hash] = elem - } -} - -func filterObject(o Value, filter Value) (Value, error) { - if filter.Compare(Null{}) == 0 { - return o, nil - } - - filteredObj, ok := filter.(*object) - if !ok { - return nil, fmt.Errorf("invalid filter value %q, expected an object", filter) - } - - switch v := o.(type) { - case String, Number, Boolean, Null: - return o, nil - case *Array: - values := NewArray() - for i := 0; i < v.Len(); i++ { - subFilter := filteredObj.Get(StringTerm(strconv.Itoa(i))) - if subFilter != nil { - filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value) - if err != nil { - return nil, err - } - values = values.Append(NewTerm(filteredValue)) - } - } - return values, nil - case Set: - values := NewSet() - err := v.Iter(func(t *Term) error { - if filteredObj.Get(t) != nil { - filteredValue, err := filterObject(t.Value, filteredObj.Get(t).Value) - if err != nil { - return err - } - values.Add(NewTerm(filteredValue)) - } - return nil - }) - return values, err - case *object: - values := NewObject() - - iterObj := v - other := filteredObj - if v.Len() < filteredObj.Len() { - iterObj = filteredObj - other = v - } - - err := iterObj.Iter(func(key *Term, _ *Term) error { - if other.Get(key) != nil { - filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value) - if err != nil { - return err - } - values.Insert(key, NewTerm(filteredValue)) - } - return nil - }) - return values, err - default: - return nil, fmt.Errorf("invalid object value type %q", v) - } + return v1.Item(key, value) } // NOTE(philipc): The only way to get an ObjectKeyIterator should be // from an Object. This ensures that the iterator can have implementation- // specific details internally, with no contracts except to the very // limited interface. -type ObjectKeysIterator interface { - Next() (*Term, bool) -} - -type objectKeysIterator struct { - obj *object - numKeys int - index int -} - -func newobjectKeysIterator(o *object) ObjectKeysIterator { - return &objectKeysIterator{ - obj: o, - numKeys: o.Len(), - index: 0, - } -} - -func (oki *objectKeysIterator) Next() (*Term, bool) { - if oki.index == oki.numKeys || oki.numKeys == 0 { - return nil, false - } - oki.index++ - return oki.obj.sortedKeys()[oki.index-1].key, true -} +type ObjectKeysIterator = v1.ObjectKeysIterator // ArrayComprehension represents an array comprehension as defined in the language. -type ArrayComprehension struct { - Term *Term `json:"term"` - Body Body `json:"body"` -} +type ArrayComprehension = v1.ArrayComprehension // ArrayComprehensionTerm creates a new Term with an ArrayComprehension value. func ArrayComprehensionTerm(term *Term, body Body) *Term { - return &Term{ - Value: &ArrayComprehension{ - Term: term, - Body: body, - }, - } -} - -// Copy returns a deep copy of ac. -func (ac *ArrayComprehension) Copy() *ArrayComprehension { - cpy := *ac - cpy.Body = ac.Body.Copy() - cpy.Term = ac.Term.Copy() - return &cpy -} - -// Equal returns true if ac is equal to other. -func (ac *ArrayComprehension) Equal(other Value) bool { - return Compare(ac, other) == 0 -} - -// Compare compares ac to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (ac *ArrayComprehension) Compare(other Value) int { - return Compare(ac, other) -} - -// Find returns the current value or a not found error. -func (ac *ArrayComprehension) Find(path Ref) (Value, error) { - if len(path) == 0 { - return ac, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code of the Value. -func (ac *ArrayComprehension) Hash() int { - return ac.Term.Hash() + ac.Body.Hash() -} - -// IsGround returns true if the Term and Body are ground. -func (ac *ArrayComprehension) IsGround() bool { - return ac.Term.IsGround() && ac.Body.IsGround() -} - -func (ac *ArrayComprehension) String() string { - return "[" + ac.Term.String() + " | " + ac.Body.String() + "]" + return v1.ArrayComprehensionTerm(term, body) } // ObjectComprehension represents an object comprehension as defined in the language. -type ObjectComprehension struct { - Key *Term `json:"key"` - Value *Term `json:"value"` - Body Body `json:"body"` -} +type ObjectComprehension = v1.ObjectComprehension // ObjectComprehensionTerm creates a new Term with an ObjectComprehension value. func ObjectComprehensionTerm(key, value *Term, body Body) *Term { - return &Term{ - Value: &ObjectComprehension{ - Key: key, - Value: value, - Body: body, - }, - } -} - -// Copy returns a deep copy of oc. -func (oc *ObjectComprehension) Copy() *ObjectComprehension { - cpy := *oc - cpy.Body = oc.Body.Copy() - cpy.Key = oc.Key.Copy() - cpy.Value = oc.Value.Copy() - return &cpy -} - -// Equal returns true if oc is equal to other. -func (oc *ObjectComprehension) Equal(other Value) bool { - return Compare(oc, other) == 0 -} - -// Compare compares oc to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (oc *ObjectComprehension) Compare(other Value) int { - return Compare(oc, other) -} - -// Find returns the current value or a not found error. -func (oc *ObjectComprehension) Find(path Ref) (Value, error) { - if len(path) == 0 { - return oc, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code of the Value. -func (oc *ObjectComprehension) Hash() int { - return oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash() -} - -// IsGround returns true if the Key, Value and Body are ground. -func (oc *ObjectComprehension) IsGround() bool { - return oc.Key.IsGround() && oc.Value.IsGround() && oc.Body.IsGround() -} - -func (oc *ObjectComprehension) String() string { - return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}" + return v1.ObjectComprehensionTerm(key, value, body) } // SetComprehension represents a set comprehension as defined in the language. -type SetComprehension struct { - Term *Term `json:"term"` - Body Body `json:"body"` -} +type SetComprehension = v1.SetComprehension // SetComprehensionTerm creates a new Term with an SetComprehension value. func SetComprehensionTerm(term *Term, body Body) *Term { - return &Term{ - Value: &SetComprehension{ - Term: term, - Body: body, - }, - } -} - -// Copy returns a deep copy of sc. -func (sc *SetComprehension) Copy() *SetComprehension { - cpy := *sc - cpy.Body = sc.Body.Copy() - cpy.Term = sc.Term.Copy() - return &cpy -} - -// Equal returns true if sc is equal to other. -func (sc *SetComprehension) Equal(other Value) bool { - return Compare(sc, other) == 0 -} - -// Compare compares sc to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (sc *SetComprehension) Compare(other Value) int { - return Compare(sc, other) -} - -// Find returns the current value or a not found error. -func (sc *SetComprehension) Find(path Ref) (Value, error) { - if len(path) == 0 { - return sc, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code of the Value. -func (sc *SetComprehension) Hash() int { - return sc.Term.Hash() + sc.Body.Hash() -} - -// IsGround returns true if the Term and Body are ground. -func (sc *SetComprehension) IsGround() bool { - return sc.Term.IsGround() && sc.Body.IsGround() -} - -func (sc *SetComprehension) String() string { - return "{" + sc.Term.String() + " | " + sc.Body.String() + "}" + return v1.SetComprehensionTerm(term, body) } // Call represents as function call in the language. -type Call []*Term +type Call = v1.Call // CallTerm returns a new Term with a Call value defined by terms. The first // term is the operator and the rest are operands. func CallTerm(terms ...*Term) *Term { - return NewTerm(Call(terms)) -} - -// Copy returns a deep copy of c. -func (c Call) Copy() Call { - return termSliceCopy(c) -} - -// Compare compares c to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (c Call) Compare(other Value) int { - return Compare(c, other) -} - -// Find returns the current value or a not found error. -func (c Call) Find(Ref) (Value, error) { - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (c Call) Hash() int { - return termSliceHash(c) -} - -// IsGround returns true if the Value is ground. -func (c Call) IsGround() bool { - return termSliceIsGround(c) -} - -// MakeExpr returns an ew Expr from this call. -func (c Call) MakeExpr(output *Term) *Expr { - terms := []*Term(c) - return NewExpr(append(terms, output)) -} - -func (c Call) String() string { - args := make([]string, len(c)-1) - for i := 1; i < len(c); i++ { - args[i-1] = c[i].String() - } - return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", ")) -} - -func termSliceCopy(a []*Term) []*Term { - cpy := make([]*Term, len(a)) - for i := range a { - cpy[i] = a[i].Copy() - } - return cpy -} - -func termSliceEqual(a, b []*Term) bool { - if len(a) == len(b) { - for i := range a { - if !a[i].Equal(b[i]) { - return false - } - } - return true - } - return false -} - -func termSliceHash(a []*Term) int { - var hash int - for _, v := range a { - hash += v.Value.Hash() - } - return hash -} - -func termSliceIsGround(a []*Term) bool { - for _, v := range a { - if !v.IsGround() { - return false - } - } - return true -} - -// NOTE(tsandall): The unmarshalling errors in these functions are not -// helpful for callers because they do not identify the source of the -// unmarshalling error. Because OPA doesn't accept JSON describing ASTs -// from callers, this is acceptable (for now). If that changes in the future, -// the error messages should be revisited. The current approach focuses -// on the happy path and treats all errors the same. If better error -// reporting is needed, the error paths will need to be fleshed out. - -func unmarshalBody(b []interface{}) (Body, error) { - buf := Body{} - for _, e := range b { - if m, ok := e.(map[string]interface{}); ok { - expr := &Expr{} - if err := unmarshalExpr(expr, m); err == nil { - buf = append(buf, expr) - continue - } - } - goto unmarshal_error - } - return buf, nil -unmarshal_error: - return nil, fmt.Errorf("ast: unable to unmarshal body") -} - -func unmarshalExpr(expr *Expr, v map[string]interface{}) error { - if x, ok := v["negated"]; ok { - if b, ok := x.(bool); ok { - expr.Negated = b - } else { - return fmt.Errorf("ast: unable to unmarshal negated field with type: %T (expected true or false)", v["negated"]) - } - } - if generatedRaw, ok := v["generated"]; ok { - if b, ok := generatedRaw.(bool); ok { - expr.Generated = b - } else { - return fmt.Errorf("ast: unable to unmarshal generated field with type: %T (expected true or false)", v["generated"]) - } - } - - if err := unmarshalExprIndex(expr, v); err != nil { - return err - } - switch ts := v["terms"].(type) { - case map[string]interface{}: - t, err := unmarshalTerm(ts) - if err != nil { - return err - } - expr.Terms = t - case []interface{}: - terms, err := unmarshalTermSlice(ts) - if err != nil { - return err - } - expr.Terms = terms - default: - return fmt.Errorf(`ast: unable to unmarshal terms field with type: %T (expected {"value": ..., "type": ...} or [{"value": ..., "type": ...}, ...])`, v["terms"]) - } - if x, ok := v["with"]; ok { - if sl, ok := x.([]interface{}); ok { - ws := make([]*With, len(sl)) - for i := range sl { - var err error - ws[i], err = unmarshalWith(sl[i]) - if err != nil { - return err - } - } - expr.With = ws - } - } - if loc, ok := v["location"].(map[string]interface{}); ok { - expr.Location = &Location{} - if err := unmarshalLocation(expr.Location, loc); err != nil { - return err - } - } - return nil -} - -func unmarshalLocation(loc *Location, v map[string]interface{}) error { - if x, ok := v["file"]; ok { - if s, ok := x.(string); ok { - loc.File = s - } else { - return fmt.Errorf("ast: unable to unmarshal file field with type: %T (expected string)", v["file"]) - } - } - if x, ok := v["row"]; ok { - if n, ok := x.(json.Number); ok { - i64, err := n.Int64() - if err != nil { - return err - } - loc.Row = int(i64) - } else { - return fmt.Errorf("ast: unable to unmarshal row field with type: %T (expected number)", v["row"]) - } - } - if x, ok := v["col"]; ok { - if n, ok := x.(json.Number); ok { - i64, err := n.Int64() - if err != nil { - return err - } - loc.Col = int(i64) - } else { - return fmt.Errorf("ast: unable to unmarshal col field with type: %T (expected number)", v["col"]) - } - } - - return nil -} - -func unmarshalExprIndex(expr *Expr, v map[string]interface{}) error { - if x, ok := v["index"]; ok { - if n, ok := x.(json.Number); ok { - i, err := n.Int64() - if err == nil { - expr.Index = int(i) - return nil - } - } - } - return fmt.Errorf("ast: unable to unmarshal index field with type: %T (expected integer)", v["index"]) -} - -func unmarshalTerm(m map[string]interface{}) (*Term, error) { - var term Term - - v, err := unmarshalValue(m) - if err != nil { - return nil, err - } - term.Value = v - - if loc, ok := m["location"].(map[string]interface{}); ok { - term.Location = &Location{} - if err := unmarshalLocation(term.Location, loc); err != nil { - return nil, err - } - } - - return &term, nil -} - -func unmarshalTermSlice(s []interface{}) ([]*Term, error) { - buf := []*Term{} - for _, x := range s { - if m, ok := x.(map[string]interface{}); ok { - t, err := unmarshalTerm(m) - if err == nil { - buf = append(buf, t) - continue - } - return nil, err - } - return nil, fmt.Errorf("ast: unable to unmarshal term") - } - return buf, nil -} - -func unmarshalTermSliceValue(d map[string]interface{}) ([]*Term, error) { - if s, ok := d["value"].([]interface{}); ok { - return unmarshalTermSlice(s) - } - return nil, fmt.Errorf(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`) -} - -func unmarshalWith(i interface{}) (*With, error) { - if m, ok := i.(map[string]interface{}); ok { - tgt, _ := m["target"].(map[string]interface{}) - target, err := unmarshalTerm(tgt) - if err == nil { - val, _ := m["value"].(map[string]interface{}) - value, err := unmarshalTerm(val) - if err == nil { - return &With{ - Target: target, - Value: value, - }, nil - } - return nil, err - } - return nil, err - } - return nil, fmt.Errorf(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`) -} - -func unmarshalValue(d map[string]interface{}) (Value, error) { - v := d["value"] - switch d["type"] { - case "null": - return Null{}, nil - case "boolean": - if b, ok := v.(bool); ok { - return Boolean(b), nil - } - case "number": - if n, ok := v.(json.Number); ok { - return Number(n), nil - } - case "string": - if s, ok := v.(string); ok { - return String(s), nil - } - case "var": - if s, ok := v.(string); ok { - return Var(s), nil - } - case "ref": - if s, err := unmarshalTermSliceValue(d); err == nil { - return Ref(s), nil - } - case "array": - if s, err := unmarshalTermSliceValue(d); err == nil { - return NewArray(s...), nil - } - case "set": - if s, err := unmarshalTermSliceValue(d); err == nil { - set := NewSet() - for _, x := range s { - set.Add(x) - } - return set, nil - } - case "object": - if s, ok := v.([]interface{}); ok { - buf := NewObject() - for _, x := range s { - if i, ok := x.([]interface{}); ok && len(i) == 2 { - p, err := unmarshalTermSlice(i) - if err == nil { - buf.Insert(p[0], p[1]) - continue - } - } - goto unmarshal_error - } - return buf, nil - } - case "arraycomprehension", "setcomprehension": - if m, ok := v.(map[string]interface{}); ok { - t, ok := m["term"].(map[string]interface{}) - if !ok { - goto unmarshal_error - } - - term, err := unmarshalTerm(t) - if err != nil { - goto unmarshal_error - } - - b, ok := m["body"].([]interface{}) - if !ok { - goto unmarshal_error - } - - body, err := unmarshalBody(b) - if err != nil { - goto unmarshal_error - } - - if d["type"] == "arraycomprehension" { - return &ArrayComprehension{Term: term, Body: body}, nil - } - return &SetComprehension{Term: term, Body: body}, nil - } - case "objectcomprehension": - if m, ok := v.(map[string]interface{}); ok { - k, ok := m["key"].(map[string]interface{}) - if !ok { - goto unmarshal_error - } - - key, err := unmarshalTerm(k) - if err != nil { - goto unmarshal_error - } - - v, ok := m["value"].(map[string]interface{}) - if !ok { - goto unmarshal_error - } - - value, err := unmarshalTerm(v) - if err != nil { - goto unmarshal_error - } - - b, ok := m["body"].([]interface{}) - if !ok { - goto unmarshal_error - } - - body, err := unmarshalBody(b) - if err != nil { - goto unmarshal_error - } - - return &ObjectComprehension{Key: key, Value: value, Body: body}, nil - } - case "call": - if s, err := unmarshalTermSliceValue(d); err == nil { - return Call(s), nil - } - } -unmarshal_error: - return nil, fmt.Errorf("ast: unable to unmarshal term") + return v1.CallTerm(terms...) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/transform.go b/vendor/github.com/open-policy-agent/opa/ast/transform.go index 391a1648606..cfb137813f0 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/transform.go +++ b/vendor/github.com/open-policy-agent/opa/ast/transform.go @@ -5,427 +5,42 @@ package ast import ( - "fmt" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Transformer defines the interface for transforming AST elements. If the // transformer returns nil and does not indicate an error, the AST element will // be set to nil and no transformations will be applied to children of the // element. -type Transformer interface { - Transform(interface{}) (interface{}, error) -} +type Transformer = v1.Transformer // Transform iterates the AST and calls the Transform function on the // Transformer t for x before recursing. func Transform(t Transformer, x interface{}) (interface{}, error) { - - if term, ok := x.(*Term); ok { - return Transform(t, term.Value) - } - - y, err := t.Transform(x) - if err != nil { - return x, err - } - - if y == nil { - return nil, nil - } - - var ok bool - switch y := y.(type) { - case *Module: - p, err := Transform(t, y.Package) - if err != nil { - return nil, err - } - if y.Package, ok = p.(*Package); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p) - } - for i := range y.Imports { - imp, err := Transform(t, y.Imports[i]) - if err != nil { - return nil, err - } - if y.Imports[i], ok = imp.(*Import); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp) - } - } - for i := range y.Rules { - rule, err := Transform(t, y.Rules[i]) - if err != nil { - return nil, err - } - if y.Rules[i], ok = rule.(*Rule); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule) - } - } - for i := range y.Annotations { - a, err := Transform(t, y.Annotations[i]) - if err != nil { - return nil, err - } - if y.Annotations[i], ok = a.(*Annotations); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Annotations[i], a) - } - } - for i := range y.Comments { - comment, err := Transform(t, y.Comments[i]) - if err != nil { - return nil, err - } - if y.Comments[i], ok = comment.(*Comment); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment) - } - } - return y, nil - case *Package: - ref, err := Transform(t, y.Path) - if err != nil { - return nil, err - } - if y.Path, ok = ref.(Ref); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref) - } - return y, nil - case *Import: - y.Path, err = transformTerm(t, y.Path) - if err != nil { - return nil, err - } - if y.Alias, err = transformVar(t, y.Alias); err != nil { - return nil, err - } - return y, nil - case *Rule: - if y.Head, err = transformHead(t, y.Head); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - if y.Else != nil { - rule, err := Transform(t, y.Else) - if err != nil { - return nil, err - } - if y.Else, ok = rule.(*Rule); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule) - } - } - return y, nil - case *Head: - if y.Reference, err = transformRef(t, y.Reference); err != nil { - return nil, err - } - if y.Name, err = transformVar(t, y.Name); err != nil { - return nil, err - } - if y.Args, err = transformArgs(t, y.Args); err != nil { - return nil, err - } - if y.Key != nil { - if y.Key, err = transformTerm(t, y.Key); err != nil { - return nil, err - } - } - if y.Value != nil { - if y.Value, err = transformTerm(t, y.Value); err != nil { - return nil, err - } - } - return y, nil - case Args: - for i := range y { - if y[i], err = transformTerm(t, y[i]); err != nil { - return nil, err - } - } - return y, nil - case Body: - for i, e := range y { - e, err := Transform(t, e) - if err != nil { - return nil, err - } - if y[i], ok = e.(*Expr); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e) - } - } - return y, nil - case *Expr: - switch ts := y.Terms.(type) { - case *SomeDecl: - decl, err := Transform(t, ts) - if err != nil { - return nil, err - } - if y.Terms, ok = decl.(*SomeDecl); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y, decl) - } - return y, nil - case []*Term: - for i := range ts { - if ts[i], err = transformTerm(t, ts[i]); err != nil { - return nil, err - } - } - case *Term: - if y.Terms, err = transformTerm(t, ts); err != nil { - return nil, err - } - case *Every: - if ts.Key != nil { - ts.Key, err = transformTerm(t, ts.Key) - if err != nil { - return nil, err - } - } - ts.Value, err = transformTerm(t, ts.Value) - if err != nil { - return nil, err - } - ts.Domain, err = transformTerm(t, ts.Domain) - if err != nil { - return nil, err - } - ts.Body, err = transformBody(t, ts.Body) - if err != nil { - return nil, err - } - y.Terms = ts - } - for i, w := range y.With { - w, err := Transform(t, w) - if err != nil { - return nil, err - } - if y.With[i], ok = w.(*With); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w) - } - } - return y, nil - case *With: - if y.Target, err = transformTerm(t, y.Target); err != nil { - return nil, err - } - if y.Value, err = transformTerm(t, y.Value); err != nil { - return nil, err - } - return y, nil - case Ref: - for i, term := range y { - if y[i], err = transformTerm(t, term); err != nil { - return nil, err - } - } - return y, nil - case *object: - return y.Map(func(k, v *Term) (*Term, *Term, error) { - k, err := transformTerm(t, k) - if err != nil { - return nil, nil, err - } - v, err = transformTerm(t, v) - if err != nil { - return nil, nil, err - } - return k, v, nil - }) - case *Array: - for i := 0; i < y.Len(); i++ { - v, err := transformTerm(t, y.Elem(i)) - if err != nil { - return nil, err - } - y.set(i, v) - } - return y, nil - case Set: - y, err = y.Map(func(term *Term) (*Term, error) { - return transformTerm(t, term) - }) - if err != nil { - return nil, err - } - return y, nil - case *ArrayComprehension: - if y.Term, err = transformTerm(t, y.Term); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - return y, nil - case *ObjectComprehension: - if y.Key, err = transformTerm(t, y.Key); err != nil { - return nil, err - } - if y.Value, err = transformTerm(t, y.Value); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - return y, nil - case *SetComprehension: - if y.Term, err = transformTerm(t, y.Term); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - return y, nil - case Call: - for i := range y { - if y[i], err = transformTerm(t, y[i]); err != nil { - return nil, err - } - } - return y, nil - default: - return y, nil - } + return v1.Transform(t, x) } // TransformRefs calls the function f on all references under x. func TransformRefs(x interface{}, f func(Ref) (Value, error)) (interface{}, error) { - t := &GenericTransformer{func(x interface{}) (interface{}, error) { - if r, ok := x.(Ref); ok { - return f(r) - } - return x, nil - }} - return Transform(t, x) + return v1.TransformRefs(x, f) } // TransformVars calls the function f on all vars under x. func TransformVars(x interface{}, f func(Var) (Value, error)) (interface{}, error) { - t := &GenericTransformer{func(x interface{}) (interface{}, error) { - if v, ok := x.(Var); ok { - return f(v) - } - return x, nil - }} - return Transform(t, x) + return v1.TransformVars(x, f) } // TransformComprehensions calls the functio nf on all comprehensions under x. func TransformComprehensions(x interface{}, f func(interface{}) (Value, error)) (interface{}, error) { - t := &GenericTransformer{func(x interface{}) (interface{}, error) { - switch x := x.(type) { - case *ArrayComprehension: - return f(x) - case *SetComprehension: - return f(x) - case *ObjectComprehension: - return f(x) - } - return x, nil - }} - return Transform(t, x) + return v1.TransformComprehensions(x, f) } // GenericTransformer implements the Transformer interface to provide a utility // to transform AST nodes using a closure. -type GenericTransformer struct { - f func(interface{}) (interface{}, error) -} +type GenericTransformer = v1.GenericTransformer // NewGenericTransformer returns a new GenericTransformer that will transform // AST nodes using the function f. func NewGenericTransformer(f func(x interface{}) (interface{}, error)) *GenericTransformer { - return &GenericTransformer{ - f: f, - } -} - -// Transform calls the function f on the GenericTransformer. -func (t *GenericTransformer) Transform(x interface{}) (interface{}, error) { - return t.f(x) -} - -func transformHead(t Transformer, head *Head) (*Head, error) { - y, err := Transform(t, head) - if err != nil { - return nil, err - } - h, ok := y.(*Head) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", head, y) - } - return h, nil -} - -func transformArgs(t Transformer, args Args) (Args, error) { - y, err := Transform(t, args) - if err != nil { - return nil, err - } - a, ok := y.(Args) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", args, y) - } - return a, nil -} - -func transformBody(t Transformer, body Body) (Body, error) { - y, err := Transform(t, body) - if err != nil { - return nil, err - } - r, ok := y.(Body) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", body, y) - } - return r, nil -} - -func transformTerm(t Transformer, term *Term) (*Term, error) { - v, err := transformValue(t, term.Value) - if err != nil { - return nil, err - } - r := &Term{ - Value: v, - Location: term.Location, - } - return r, nil -} - -func transformValue(t Transformer, v Value) (Value, error) { - v1, err := Transform(t, v) - if err != nil { - return nil, err - } - r, ok := v1.(Value) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", v, v1) - } - return r, nil -} - -func transformVar(t Transformer, v Var) (Var, error) { - v1, err := Transform(t, v) - if err != nil { - return "", err - } - r, ok := v1.(Var) - if !ok { - return "", fmt.Errorf("illegal transform: %T != %T", v, v1) - } - return r, nil -} - -func transformRef(t Transformer, r Ref) (Ref, error) { - r1, err := Transform(t, r) - if err != nil { - return nil, err - } - r2, ok := r1.(Ref) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", r, r2) - } - return r2, nil + return v1.NewGenericTransformer(f) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/unify.go b/vendor/github.com/open-policy-agent/opa/ast/unify.go index 60244974a92..3cb260272aa 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/unify.go +++ b/vendor/github.com/open-policy-agent/opa/ast/unify.go @@ -4,232 +4,11 @@ package ast -func isRefSafe(ref Ref, safe VarSet) bool { - switch head := ref[0].Value.(type) { - case Var: - return safe.Contains(head) - case Call: - return isCallSafe(head, safe) - default: - for v := range ref[0].Vars() { - if !safe.Contains(v) { - return false - } - } - return true - } -} - -func isCallSafe(call Call, safe VarSet) bool { - vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) - vis.Walk(call) - unsafe := vis.Vars().Diff(safe) - return len(unsafe) == 0 -} +import v1 "github.com/open-policy-agent/opa/v1/ast" // Unify returns a set of variables that will be unified when the equality expression defined by // terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already // unified. func Unify(safe VarSet, a *Term, b *Term) VarSet { - u := &unifier{ - safe: safe, - unified: VarSet{}, - unknown: map[Var]VarSet{}, - } - u.unify(a, b) - return u.unified -} - -type unifier struct { - safe VarSet - unified VarSet - unknown map[Var]VarSet -} - -func (u *unifier) isSafe(x Var) bool { - return u.safe.Contains(x) || u.unified.Contains(x) -} - -func (u *unifier) unify(a *Term, b *Term) { - - switch a := a.Value.(type) { - - case Var: - switch b := b.Value.(type) { - case Var: - if u.isSafe(b) { - u.markSafe(a) - } else if u.isSafe(a) { - u.markSafe(b) - } else { - u.markUnknown(a, b) - u.markUnknown(b, a) - } - case *Array, Object: - u.unifyAll(a, b) - case Ref: - if isRefSafe(b, u.safe) { - u.markSafe(a) - } - case Call: - if isCallSafe(b, u.safe) { - u.markSafe(a) - } - default: - u.markSafe(a) - } - - case Ref: - if isRefSafe(a, u.safe) { - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *Array, Object: - u.markAllSafe(b) - } - } - - case Call: - if isCallSafe(a, u.safe) { - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *Array, Object: - u.markAllSafe(b) - } - } - - case *ArrayComprehension: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *Array: - u.markAllSafe(b) - } - case *ObjectComprehension: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *object: - u.markAllSafe(b) - } - case *SetComprehension: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - } - - case *Array: - switch b := b.Value.(type) { - case Var: - u.unifyAll(b, a) - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - u.markAllSafe(a) - case Ref: - if isRefSafe(b, u.safe) { - u.markAllSafe(a) - } - case Call: - if isCallSafe(b, u.safe) { - u.markAllSafe(a) - } - case *Array: - if a.Len() == b.Len() { - for i := 0; i < a.Len(); i++ { - u.unify(a.Elem(i), b.Elem(i)) - } - } - } - - case *object: - switch b := b.Value.(type) { - case Var: - u.unifyAll(b, a) - case Ref: - if isRefSafe(b, u.safe) { - u.markAllSafe(a) - } - case Call: - if isCallSafe(b, u.safe) { - u.markAllSafe(a) - } - case *object: - if a.Len() == b.Len() { - _ = a.Iter(func(k, v *Term) error { - if v2 := b.Get(k); v2 != nil { - u.unify(v, v2) - } - return nil - }) // impossible to return error - } - } - - default: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - } - } -} - -func (u *unifier) markAllSafe(x Value) { - vis := u.varVisitor() - vis.Walk(x) - for v := range vis.Vars() { - u.markSafe(v) - } -} - -func (u *unifier) markSafe(x Var) { - u.unified.Add(x) - - // Add dependencies of 'x' to safe set - vs := u.unknown[x] - delete(u.unknown, x) - for v := range vs { - u.markSafe(v) - } - - // Add dependants of 'x' to safe set if they have no more - // dependencies. - for v, deps := range u.unknown { - if deps.Contains(x) { - delete(deps, x) - if len(deps) == 0 { - u.markSafe(v) - } - } - } -} - -func (u *unifier) markUnknown(a, b Var) { - if _, ok := u.unknown[a]; !ok { - u.unknown[a] = NewVarSet() - } - u.unknown[a].Add(b) -} - -func (u *unifier) unifyAll(a Var, b Value) { - if u.isSafe(a) { - u.markAllSafe(b) - } else { - vis := u.varVisitor() - vis.Walk(b) - unsafe := vis.Vars().Diff(u.safe).Diff(u.unified) - if len(unsafe) == 0 { - u.markSafe(a) - } else { - for v := range unsafe { - u.markUnknown(a, v) - } - } - } -} - -func (u *unifier) varVisitor() *VarVisitor { - return NewVarVisitor().WithParams(VarVisitorParams{ - SkipRefHead: true, - SkipObjectKeys: true, - SkipClosures: true, - }) + return v1.Unify(safe, a, b) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/varset.go b/vendor/github.com/open-policy-agent/opa/ast/varset.go index 14f531494b1..9e7db8efdad 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/varset.go +++ b/vendor/github.com/open-policy-agent/opa/ast/varset.go @@ -5,96 +5,13 @@ package ast import ( - "fmt" - "sort" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // VarSet represents a set of variables. -type VarSet map[Var]struct{} +type VarSet = v1.VarSet // NewVarSet returns a new VarSet containing the specified variables. func NewVarSet(vs ...Var) VarSet { - s := VarSet{} - for _, v := range vs { - s.Add(v) - } - return s -} - -// Add updates the set to include the variable "v". -func (s VarSet) Add(v Var) { - s[v] = struct{}{} -} - -// Contains returns true if the set contains the variable "v". -func (s VarSet) Contains(v Var) bool { - _, ok := s[v] - return ok -} - -// Copy returns a shallow copy of the VarSet. -func (s VarSet) Copy() VarSet { - cpy := VarSet{} - for v := range s { - cpy.Add(v) - } - return cpy -} - -// Diff returns a VarSet containing variables in s that are not in vs. -func (s VarSet) Diff(vs VarSet) VarSet { - r := VarSet{} - for v := range s { - if !vs.Contains(v) { - r.Add(v) - } - } - return r -} - -// Equal returns true if s contains exactly the same elements as vs. -func (s VarSet) Equal(vs VarSet) bool { - if len(s.Diff(vs)) > 0 { - return false - } - return len(vs.Diff(s)) == 0 -} - -// Intersect returns a VarSet containing variables in s that are in vs. -func (s VarSet) Intersect(vs VarSet) VarSet { - r := VarSet{} - for v := range s { - if vs.Contains(v) { - r.Add(v) - } - } - return r -} - -// Sorted returns a sorted slice of vars from s. -func (s VarSet) Sorted() []Var { - sorted := make([]Var, 0, len(s)) - for v := range s { - sorted = append(sorted, v) - } - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Compare(sorted[j]) < 0 - }) - return sorted -} - -// Update merges the other VarSet into this VarSet. -func (s VarSet) Update(vs VarSet) { - for v := range vs { - s.Add(v) - } -} - -func (s VarSet) String() string { - tmp := make([]string, 0, len(s)) - for v := range s { - tmp = append(tmp, string(v)) - } - sort.Strings(tmp) - return fmt.Sprintf("%v", tmp) + return v1.NewVarSet(vs...) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/visit.go b/vendor/github.com/open-policy-agent/opa/ast/visit.go index d83c31149ed..94823c6cc77 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/visit.go +++ b/vendor/github.com/open-policy-agent/opa/ast/visit.go @@ -4,780 +4,120 @@ package ast +import v1 "github.com/open-policy-agent/opa/v1/ast" + // Visitor defines the interface for iterating AST elements. The Visit function // can return a Visitor w which will be used to visit the children of the AST // element v. If the Visit function returns nil, the children will not be // visited. // Deprecated: use GenericVisitor or another visitor implementation -type Visitor interface { - Visit(v interface{}) (w Visitor) -} +type Visitor = v1.Visitor // BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before // and after the AST has been visited. // Deprecated: use GenericVisitor or another visitor implementation -type BeforeAndAfterVisitor interface { - Visitor - Before(x interface{}) - After(x interface{}) -} +type BeforeAndAfterVisitor = v1.BeforeAndAfterVisitor // Walk iterates the AST by calling the Visit function on the Visitor // v for x before recursing. // Deprecated: use GenericVisitor.Walk func Walk(v Visitor, x interface{}) { - if bav, ok := v.(BeforeAndAfterVisitor); !ok { - walk(v, x) - } else { - bav.Before(x) - defer bav.After(x) - walk(bav, x) - } + v1.Walk(v, x) } // WalkBeforeAndAfter iterates the AST by calling the Visit function on the // Visitor v for x before recursing. // Deprecated: use GenericVisitor.Walk func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x interface{}) { - Walk(v, x) -} - -func walk(v Visitor, x interface{}) { - w := v.Visit(x) - if w == nil { - return - } - switch x := x.(type) { - case *Module: - Walk(w, x.Package) - for i := range x.Imports { - Walk(w, x.Imports[i]) - } - for i := range x.Rules { - Walk(w, x.Rules[i]) - } - for i := range x.Annotations { - Walk(w, x.Annotations[i]) - } - for i := range x.Comments { - Walk(w, x.Comments[i]) - } - case *Package: - Walk(w, x.Path) - case *Import: - Walk(w, x.Path) - Walk(w, x.Alias) - case *Rule: - Walk(w, x.Head) - Walk(w, x.Body) - if x.Else != nil { - Walk(w, x.Else) - } - case *Head: - Walk(w, x.Name) - Walk(w, x.Args) - if x.Key != nil { - Walk(w, x.Key) - } - if x.Value != nil { - Walk(w, x.Value) - } - case Body: - for i := range x { - Walk(w, x[i]) - } - case Args: - for i := range x { - Walk(w, x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - Walk(w, ts) - case []*Term: - for i := range ts { - Walk(w, ts[i]) - } - } - for i := range x.With { - Walk(w, x.With[i]) - } - case *With: - Walk(w, x.Target) - Walk(w, x.Value) - case *Term: - Walk(w, x.Value) - case Ref: - for i := range x { - Walk(w, x[i]) - } - case *object: - x.Foreach(func(k, vv *Term) { - Walk(w, k) - Walk(w, vv) - }) - case *Array: - x.Foreach(func(t *Term) { - Walk(w, t) - }) - case Set: - x.Foreach(func(t *Term) { - Walk(w, t) - }) - case *ArrayComprehension: - Walk(w, x.Term) - Walk(w, x.Body) - case *ObjectComprehension: - Walk(w, x.Key) - Walk(w, x.Value) - Walk(w, x.Body) - case *SetComprehension: - Walk(w, x.Term) - Walk(w, x.Body) - case Call: - for i := range x { - Walk(w, x[i]) - } - case *Every: - if x.Key != nil { - Walk(w, x.Key) - } - Walk(w, x.Value) - Walk(w, x.Domain) - Walk(w, x.Body) - case *SomeDecl: - for i := range x.Symbols { - Walk(w, x.Symbols[i]) - } - } + v1.WalkBeforeAndAfter(v, x) } // WalkVars calls the function f on all vars under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkVars(x interface{}, f func(Var) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if v, ok := x.(Var); ok { - return f(v) - } - return false - }} - vis.Walk(x) + v1.WalkVars(x, f) } // WalkClosures calls the function f on all closures under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkClosures(x interface{}, f func(interface{}) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: - return f(x) - } - return false - }} - vis.Walk(x) + v1.WalkClosures(x, f) } // WalkRefs calls the function f on all references under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkRefs(x interface{}, f func(Ref) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if r, ok := x.(Ref); ok { - return f(r) - } - return false - }} - vis.Walk(x) + v1.WalkRefs(x, f) } // WalkTerms calls the function f on all terms under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkTerms(x interface{}, f func(*Term) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if term, ok := x.(*Term); ok { - return f(term) - } - return false - }} - vis.Walk(x) + v1.WalkTerms(x, f) } // WalkWiths calls the function f on all with modifiers under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkWiths(x interface{}, f func(*With) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if w, ok := x.(*With); ok { - return f(w) - } - return false - }} - vis.Walk(x) + v1.WalkWiths(x, f) } // WalkExprs calls the function f on all expressions under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkExprs(x interface{}, f func(*Expr) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if r, ok := x.(*Expr); ok { - return f(r) - } - return false - }} - vis.Walk(x) + v1.WalkExprs(x, f) } // WalkBodies calls the function f on all bodies under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkBodies(x interface{}, f func(Body) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if b, ok := x.(Body); ok { - return f(b) - } - return false - }} - vis.Walk(x) + v1.WalkBodies(x, f) } // WalkRules calls the function f on all rules under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkRules(x interface{}, f func(*Rule) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if r, ok := x.(*Rule); ok { - stop := f(r) - // NOTE(tsandall): since rules cannot be embedded inside of queries - // we can stop early if there is no else block. - if stop || r.Else == nil { - return true - } - } - return false - }} - vis.Walk(x) + v1.WalkRules(x, f) } // WalkNodes calls the function f on all nodes under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkNodes(x interface{}, f func(Node) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if n, ok := x.(Node); ok { - return f(n) - } - return false - }} - vis.Walk(x) + v1.WalkNodes(x, f) } // GenericVisitor provides a utility to walk over AST nodes using a // closure. If the closure returns true, the visitor will not walk // over AST nodes under x. -type GenericVisitor struct { - f func(x interface{}) bool -} +type GenericVisitor = v1.GenericVisitor // NewGenericVisitor returns a new GenericVisitor that will invoke the function // f on AST nodes. func NewGenericVisitor(f func(x interface{}) bool) *GenericVisitor { - return &GenericVisitor{f} -} - -// Walk iterates the AST by calling the function f on the -// GenericVisitor before recursing. Contrary to the generic Walk, this -// does not require allocating the visitor from heap. -func (vis *GenericVisitor) Walk(x interface{}) { - if vis.f(x) { - return - } - - switch x := x.(type) { - case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } - for i := range x.Rules { - vis.Walk(x.Rules[i]) - } - for i := range x.Annotations { - vis.Walk(x.Annotations[i]) - } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } - case *Package: - vis.Walk(x.Path) - case *Import: - vis.Walk(x.Path) - vis.Walk(x.Alias) - case *Rule: - vis.Walk(x.Head) - vis.Walk(x.Body) - if x.Else != nil { - vis.Walk(x.Else) - } - case *Head: - vis.Walk(x.Name) - vis.Walk(x.Args) - if x.Key != nil { - vis.Walk(x.Key) - } - if x.Value != nil { - vis.Walk(x.Value) - } - case Body: - for i := range x { - vis.Walk(x[i]) - } - case Args: - for i := range x { - vis.Walk(x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - vis.Walk(ts) - case []*Term: - for i := range ts { - vis.Walk(ts[i]) - } - } - for i := range x.With { - vis.Walk(x.With[i]) - } - case *With: - vis.Walk(x.Target) - vis.Walk(x.Value) - case *Term: - vis.Walk(x.Value) - case Ref: - for i := range x { - vis.Walk(x[i]) - } - case *object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case Object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case *Array: - x.Foreach(func(t *Term) { - vis.Walk(t) - }) - case Set: - xSlice := x.Slice() - for i := range xSlice { - vis.Walk(xSlice[i]) - } - case *ArrayComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case *ObjectComprehension: - vis.Walk(x.Key) - vis.Walk(x.Value) - vis.Walk(x.Body) - case *SetComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case Call: - for i := range x { - vis.Walk(x[i]) - } - case *Every: - if x.Key != nil { - vis.Walk(x.Key) - } - vis.Walk(x.Value) - vis.Walk(x.Domain) - vis.Walk(x.Body) - case *SomeDecl: - for i := range x.Symbols { - vis.Walk(x.Symbols[i]) - } - } + return v1.NewGenericVisitor(f) } // BeforeAfterVisitor provides a utility to walk over AST nodes using // closures. If the before closure returns true, the visitor will not // walk over AST nodes under x. The after closure is invoked always // after visiting a node. -type BeforeAfterVisitor struct { - before func(x interface{}) bool - after func(x interface{}) -} +type BeforeAfterVisitor = v1.BeforeAfterVisitor // NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that // will invoke the functions before and after AST nodes. func NewBeforeAfterVisitor(before func(x interface{}) bool, after func(x interface{})) *BeforeAfterVisitor { - return &BeforeAfterVisitor{before, after} -} - -// Walk iterates the AST by calling the functions on the -// BeforeAndAfterVisitor before and after recursing. Contrary to the -// generic Walk, this does not require allocating the visitor from -// heap. -func (vis *BeforeAfterVisitor) Walk(x interface{}) { - defer vis.after(x) - if vis.before(x) { - return - } - - switch x := x.(type) { - case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } - for i := range x.Rules { - vis.Walk(x.Rules[i]) - } - for i := range x.Annotations { - vis.Walk(x.Annotations[i]) - } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } - case *Package: - vis.Walk(x.Path) - case *Import: - vis.Walk(x.Path) - vis.Walk(x.Alias) - case *Rule: - vis.Walk(x.Head) - vis.Walk(x.Body) - if x.Else != nil { - vis.Walk(x.Else) - } - case *Head: - if len(x.Reference) > 0 { - vis.Walk(x.Reference) - } else { - vis.Walk(x.Name) - if x.Key != nil { - vis.Walk(x.Key) - } - } - vis.Walk(x.Args) - if x.Value != nil { - vis.Walk(x.Value) - } - case Body: - for i := range x { - vis.Walk(x[i]) - } - case Args: - for i := range x { - vis.Walk(x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - vis.Walk(ts) - case []*Term: - for i := range ts { - vis.Walk(ts[i]) - } - } - for i := range x.With { - vis.Walk(x.With[i]) - } - case *With: - vis.Walk(x.Target) - vis.Walk(x.Value) - case *Term: - vis.Walk(x.Value) - case Ref: - for i := range x { - vis.Walk(x[i]) - } - case *object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case Object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case *Array: - x.Foreach(func(t *Term) { - vis.Walk(t) - }) - case Set: - xSlice := x.Slice() - for i := range xSlice { - vis.Walk(xSlice[i]) - } - case *ArrayComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case *ObjectComprehension: - vis.Walk(x.Key) - vis.Walk(x.Value) - vis.Walk(x.Body) - case *SetComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case Call: - for i := range x { - vis.Walk(x[i]) - } - case *Every: - if x.Key != nil { - vis.Walk(x.Key) - } - vis.Walk(x.Value) - vis.Walk(x.Domain) - vis.Walk(x.Body) - case *SomeDecl: - for i := range x.Symbols { - vis.Walk(x.Symbols[i]) - } - } + return v1.NewBeforeAfterVisitor(before, after) } // VarVisitor walks AST nodes under a given node and collects all encountered // variables. The collected variables can be controlled by specifying // VarVisitorParams when creating the visitor. -type VarVisitor struct { - params VarVisitorParams - vars VarSet -} +type VarVisitor = v1.VarVisitor // VarVisitorParams contains settings for a VarVisitor. -type VarVisitorParams struct { - SkipRefHead bool - SkipRefCallHead bool - SkipObjectKeys bool - SkipClosures bool - SkipWithTarget bool - SkipSets bool -} +type VarVisitorParams = v1.VarVisitorParams // NewVarVisitor returns a new VarVisitor object. func NewVarVisitor() *VarVisitor { - return &VarVisitor{ - vars: NewVarSet(), - } -} - -// WithParams sets the parameters in params on vis. -func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor { - vis.params = params - return vis -} - -// Vars returns a VarSet that contains collected vars. -func (vis *VarVisitor) Vars() VarSet { - return vis.vars -} - -// visit determines if the VarVisitor will recurse into x: if it returns `true`, -// the visitor will _skip_ that branch of the AST -func (vis *VarVisitor) visit(v interface{}) bool { - if vis.params.SkipObjectKeys { - if o, ok := v.(Object); ok { - o.Foreach(func(_, v *Term) { - vis.Walk(v) - }) - return true - } - } - if vis.params.SkipRefHead { - if r, ok := v.(Ref); ok { - rSlice := r[1:] - for i := range rSlice { - vis.Walk(rSlice[i]) - } - return true - } - } - if vis.params.SkipClosures { - switch v := v.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return true - case *Expr: - if ev, ok := v.Terms.(*Every); ok { - vis.Walk(ev.Domain) - // We're _not_ walking ev.Body -- that's the closure here - return true - } - } - } - if vis.params.SkipWithTarget { - if v, ok := v.(*With); ok { - vis.Walk(v.Value) - return true - } - } - if vis.params.SkipSets { - if _, ok := v.(Set); ok { - return true - } - } - if vis.params.SkipRefCallHead { - switch v := v.(type) { - case *Expr: - if terms, ok := v.Terms.([]*Term); ok { - termSlice := terms[0].Value.(Ref)[1:] - for i := range termSlice { - vis.Walk(termSlice[i]) - } - for i := 1; i < len(terms); i++ { - vis.Walk(terms[i]) - } - for i := range v.With { - vis.Walk(v.With[i]) - } - return true - } - case Call: - operator := v[0].Value.(Ref) - for i := 1; i < len(operator); i++ { - vis.Walk(operator[i]) - } - for i := 1; i < len(v); i++ { - vis.Walk(v[i]) - } - return true - case *With: - if ref, ok := v.Target.Value.(Ref); ok { - refSlice := ref[1:] - for i := range refSlice { - vis.Walk(refSlice[i]) - } - } - if ref, ok := v.Value.Value.(Ref); ok { - refSlice := ref[1:] - for i := range refSlice { - vis.Walk(refSlice[i]) - } - } else { - vis.Walk(v.Value) - } - return true - } - } - if v, ok := v.(Var); ok { - vis.vars.Add(v) - } - return false -} - -// Walk iterates the AST by calling the function f on the -// GenericVisitor before recursing. Contrary to the generic Walk, this -// does not require allocating the visitor from heap. -func (vis *VarVisitor) Walk(x interface{}) { - if vis.visit(x) { - return - } - - switch x := x.(type) { - case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } - for i := range x.Rules { - vis.Walk(x.Rules[i]) - } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } - case *Package: - vis.Walk(x.Path) - case *Import: - vis.Walk(x.Path) - vis.Walk(x.Alias) - case *Rule: - vis.Walk(x.Head) - vis.Walk(x.Body) - if x.Else != nil { - vis.Walk(x.Else) - } - case *Head: - if len(x.Reference) > 0 { - vis.Walk(x.Reference) - } else { - vis.Walk(x.Name) - if x.Key != nil { - vis.Walk(x.Key) - } - } - vis.Walk(x.Args) - - if x.Value != nil { - vis.Walk(x.Value) - } - case Body: - for i := range x { - vis.Walk(x[i]) - } - case Args: - for i := range x { - vis.Walk(x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - vis.Walk(ts) - case []*Term: - for i := range ts { - vis.Walk(ts[i]) - } - } - for i := range x.With { - vis.Walk(x.With[i]) - } - case *With: - vis.Walk(x.Target) - vis.Walk(x.Value) - case *Term: - vis.Walk(x.Value) - case Ref: - for i := range x { - vis.Walk(x[i]) - } - case *object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case *Array: - x.Foreach(func(t *Term) { - vis.Walk(t) - }) - case Set: - xSlice := x.Slice() - for i := range xSlice { - vis.Walk(xSlice[i]) - } - case *ArrayComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case *ObjectComprehension: - vis.Walk(x.Key) - vis.Walk(x.Value) - vis.Walk(x.Body) - case *SetComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case Call: - for i := range x { - vis.Walk(x[i]) - } - case *Every: - if x.Key != nil { - vis.Walk(x.Key) - } - vis.Walk(x.Value) - vis.Walk(x.Domain) - vis.Walk(x.Body) - case *SomeDecl: - for i := range x.Symbols { - vis.Walk(x.Symbols[i]) - } - } + return v1.NewVarVisitor() } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go index 0e159384ef1..50ad97349a0 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go @@ -6,1386 +6,97 @@ package bundle import ( - "archive/tar" - "bytes" - "compress/gzip" - "encoding/hex" - "encoding/json" - "errors" - "fmt" "io" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "github.com/gobwas/glob" "github.com/open-policy-agent/opa/ast" - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/format" - "github.com/open-policy-agent/opa/internal/file/archive" - "github.com/open-policy-agent/opa/internal/merge" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // Common file extensions and file names. const ( - RegoExt = ".rego" - WasmFile = "policy.wasm" - PlanFile = "plan.json" - ManifestExt = ".manifest" - SignaturesFile = "signatures.json" - patchFile = "patch.json" - dataFile = "data.json" - yamlDataFile = "data.yaml" - ymlDataFile = "data.yml" - defaultHashingAlg = "SHA-256" - DefaultSizeLimitBytes = (1024 * 1024 * 1024) // limit bundle reads to 1GB to protect against gzip bombs - DeltaBundleType = "delta" - SnapshotBundleType = "snapshot" + RegoExt = v1.RegoExt + WasmFile = v1.WasmFile + PlanFile = v1.PlanFile + ManifestExt = v1.ManifestExt + SignaturesFile = v1.SignaturesFile + + DefaultSizeLimitBytes = v1.DefaultSizeLimitBytes + DeltaBundleType = v1.DeltaBundleType + SnapshotBundleType = v1.SnapshotBundleType ) // Bundle represents a loaded bundle. The bundle can contain data and policies. -type Bundle struct { - Signatures SignaturesConfig - Manifest Manifest - Data map[string]interface{} - Modules []ModuleFile - Wasm []byte // Deprecated. Use WasmModules instead - WasmModules []WasmModuleFile - PlanModules []PlanModuleFile - Patch Patch - Etag string - Raw []Raw - - lazyLoadingMode bool - sizeLimitBytes int64 -} +type Bundle = v1.Bundle // Raw contains raw bytes representing the bundle's content -type Raw struct { - Path string - Value []byte -} +type Raw = v1.Raw // Patch contains an array of objects wherein each object represents the patch operation to be // applied to the bundle data. -type Patch struct { - Data []PatchOperation `json:"data,omitempty"` -} +type Patch = v1.Patch // PatchOperation models a single patch operation against a document. -type PatchOperation struct { - Op string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value"` -} +type PatchOperation = v1.PatchOperation // SignaturesConfig represents an array of JWTs that encapsulate the signatures for the bundle. -type SignaturesConfig struct { - Signatures []string `json:"signatures,omitempty"` - Plugin string `json:"plugin,omitempty"` -} - -// isEmpty returns if the SignaturesConfig is empty. -func (s SignaturesConfig) isEmpty() bool { - return reflect.DeepEqual(s, SignaturesConfig{}) -} +type SignaturesConfig = v1.SignaturesConfig // DecodedSignature represents the decoded JWT payload. -type DecodedSignature struct { - Files []FileInfo `json:"files"` - KeyID string `json:"keyid"` // Deprecated, use kid in the JWT header instead. - Scope string `json:"scope"` - IssuedAt int64 `json:"iat"` - Issuer string `json:"iss"` -} +type DecodedSignature = v1.DecodedSignature // FileInfo contains the hashing algorithm used, resulting digest etc. -type FileInfo struct { - Name string `json:"name"` - Hash string `json:"hash"` - Algorithm string `json:"algorithm"` -} +type FileInfo = v1.FileInfo // NewFile returns a new FileInfo. func NewFile(name, hash, alg string) FileInfo { - return FileInfo{ - Name: name, - Hash: hash, - Algorithm: alg, - } + return v1.NewFile(name, hash, alg) } // Manifest represents the manifest from a bundle. The manifest may contain // metadata such as the bundle revision. -type Manifest struct { - Revision string `json:"revision"` - Roots *[]string `json:"roots,omitempty"` - WasmResolvers []WasmResolver `json:"wasm,omitempty"` - // RegoVersion is the global Rego version for the bundle described by this Manifest. - // The Rego version of individual files can be overridden in FileRegoVersions. - // We don't use ast.RegoVersion here, as this iota type's order isn't guaranteed to be stable over time. - // We use a pointer so that we can support hand-made bundles that don't have an explicit version appropriately. - // E.g. in OPA 0.x if --v1-compatible is used when consuming the bundle, and there is no specified version, - // we should default to v1; if --v1-compatible isn't used, we should default to v0. In OPA 1.0, no --x-compatible - // flag and no explicit bundle version should default to v1. - RegoVersion *int `json:"rego_version,omitempty"` - // FileRegoVersions is a map from file paths to Rego versions. - // This allows individual files to override the global Rego version specified by RegoVersion. - FileRegoVersions map[string]int `json:"file_rego_versions,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - - compiledFileRegoVersions []fileRegoVersion -} - -type fileRegoVersion struct { - path glob.Glob - version int -} +type Manifest = v1.Manifest // WasmResolver maps a wasm module to an entrypoint ref. -type WasmResolver struct { - Entrypoint string `json:"entrypoint,omitempty"` - Module string `json:"module,omitempty"` - Annotations []*ast.Annotations `json:"annotations,omitempty"` -} - -// Init initializes the manifest. If you instantiate a manifest -// manually, call Init to ensure that the roots are set properly. -func (m *Manifest) Init() { - if m.Roots == nil { - defaultRoots := []string{""} - m.Roots = &defaultRoots - } -} - -// AddRoot adds r to the roots of m. This function is idempotent. -func (m *Manifest) AddRoot(r string) { - m.Init() - if !RootPathsContain(*m.Roots, r) { - *m.Roots = append(*m.Roots, r) - } -} - -func (m *Manifest) SetRegoVersion(v ast.RegoVersion) { - m.Init() - regoVersion := 0 - if v == ast.RegoV1 { - regoVersion = 1 - } - m.RegoVersion = ®oVersion -} - -// Equal returns true if m is semantically equivalent to other. -func (m Manifest) Equal(other Manifest) bool { - - // This is safe since both are passed by value. - m.Init() - other.Init() - - if m.Revision != other.Revision { - return false - } - - if m.RegoVersion == nil && other.RegoVersion != nil { - return false - } - if m.RegoVersion != nil && other.RegoVersion == nil { - return false - } - if m.RegoVersion != nil && other.RegoVersion != nil && *m.RegoVersion != *other.RegoVersion { - return false - } - - // If both are nil, or both are empty, we consider them equal. - if !(len(m.FileRegoVersions) == 0 && len(other.FileRegoVersions) == 0) && - !reflect.DeepEqual(m.FileRegoVersions, other.FileRegoVersions) { - return false - } - - if !reflect.DeepEqual(m.Metadata, other.Metadata) { - return false - } - - return m.equalWasmResolversAndRoots(other) -} - -func (m Manifest) Empty() bool { - return m.Equal(Manifest{}) -} - -// Copy returns a deep copy of the manifest. -func (m Manifest) Copy() Manifest { - m.Init() - roots := make([]string, len(*m.Roots)) - copy(roots, *m.Roots) - m.Roots = &roots - - wasmModules := make([]WasmResolver, len(m.WasmResolvers)) - copy(wasmModules, m.WasmResolvers) - m.WasmResolvers = wasmModules - - metadata := m.Metadata - - if metadata != nil { - m.Metadata = make(map[string]interface{}) - for k, v := range metadata { - m.Metadata[k] = v - } - } - - return m -} - -func (m Manifest) String() string { - m.Init() - if m.RegoVersion != nil { - return fmt.Sprintf("", - m.Revision, *m.RegoVersion, *m.Roots, m.WasmResolvers, m.Metadata) - } - return fmt.Sprintf("", - m.Revision, *m.Roots, m.WasmResolvers, m.Metadata) -} - -func (m Manifest) rootSet() stringSet { - rs := map[string]struct{}{} - - for _, r := range *m.Roots { - rs[r] = struct{}{} - } - - return stringSet(rs) -} - -func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool { - if len(m.WasmResolvers) != len(other.WasmResolvers) { - return false - } - - for i := 0; i < len(m.WasmResolvers); i++ { - if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) { - return false - } - } - - return m.rootSet().Equal(other.rootSet()) -} - -func (wr *WasmResolver) Equal(other *WasmResolver) bool { - if wr == nil && other == nil { - return true - } - - if wr == nil || other == nil { - return false - } - - if wr.Module != other.Module { - return false - } - - if wr.Entrypoint != other.Entrypoint { - return false - } - - annotLen := len(wr.Annotations) - if annotLen != len(other.Annotations) { - return false - } - - for i := 0; i < annotLen; i++ { - if wr.Annotations[i].Compare(other.Annotations[i]) != 0 { - return false - } - } - - return true -} - -type stringSet map[string]struct{} - -func (ss stringSet) Equal(other stringSet) bool { - if len(ss) != len(other) { - return false - } - for k := range other { - if _, ok := ss[k]; !ok { - return false - } - } - return true -} - -func (m *Manifest) validateAndInjectDefaults(b Bundle) error { - - m.Init() - - // Validate roots in bundle. - roots := *m.Roots - - // Standardize the roots (no starting or trailing slash) - for i := range roots { - roots[i] = strings.Trim(roots[i], "/") - } - - for i := 0; i < len(roots)-1; i++ { - for j := i + 1; j < len(roots); j++ { - if RootPathsOverlap(roots[i], roots[j]) { - return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j]) - } - } - } - - // Validate modules in bundle. - for _, module := range b.Modules { - found := false - if path, err := module.Parsed.Package.Path.Ptr(); err == nil { - found = RootPathsContain(roots, path) - } - if !found { - return fmt.Errorf("manifest roots %v do not permit '%v' in module '%v'", roots, module.Parsed.Package, module.Path) - } - } - - // Build a set of wasm module entrypoints to validate - wasmModuleToEps := map[string]string{} - seenEps := map[string]struct{}{} - for _, wm := range b.WasmModules { - wasmModuleToEps[wm.Path] = "" - } - - for _, wmConfig := range b.Manifest.WasmResolvers { - _, ok := wasmModuleToEps[wmConfig.Module] - if !ok { - return fmt.Errorf("manifest references wasm module '%s' but the module file does not exist", wmConfig.Module) - } - - // Ensure wasm module entrypoint in within bundle roots - if !RootPathsContain(roots, wmConfig.Entrypoint) { - return fmt.Errorf("manifest roots %v do not permit '%v' entrypoint for wasm module '%v'", roots, wmConfig.Entrypoint, wmConfig.Module) - } - - if _, ok := seenEps[wmConfig.Entrypoint]; ok { - return fmt.Errorf("entrypoint '%s' cannot be used by more than one wasm module", wmConfig.Entrypoint) - } - seenEps[wmConfig.Entrypoint] = struct{}{} - - wasmModuleToEps[wmConfig.Module] = wmConfig.Entrypoint - } - - // Validate data patches in bundle. - for _, patch := range b.Patch.Data { - path := strings.Trim(patch.Path, "/") - if !RootPathsContain(roots, path) { - return fmt.Errorf("manifest roots %v do not permit data patch at path '%s'", roots, path) - } - } - - if b.lazyLoadingMode { - return nil - } - - // Validate data in bundle. - return dfs(b.Data, "", func(path string, node interface{}) (bool, error) { - path = strings.Trim(path, "/") - if RootPathsContain(roots, path) { - return true, nil - } - - if _, ok := node.(map[string]interface{}); ok { - for i := range roots { - if RootPathsContain(strings.Split(path, "/"), roots[i]) { - return false, nil - } - } - } - return false, fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, path) - }) -} +type WasmResolver = v1.WasmResolver // ModuleFile represents a single module contained in a bundle. -type ModuleFile struct { - URL string - Path string - RelativePath string - Raw []byte - Parsed *ast.Module -} +type ModuleFile = v1.ModuleFile // WasmModuleFile represents a single wasm module contained in a bundle. -type WasmModuleFile struct { - URL string - Path string - Entrypoints []ast.Ref - Raw []byte -} +type WasmModuleFile = v1.WasmModuleFile // PlanModuleFile represents a single plan module contained in a bundle. // // NOTE(tsandall): currently the plans are just opaque binary blobs. In the // future we could inject the entrypoints so that the plans could be executed // inside of OPA proper like we do for Wasm modules. -type PlanModuleFile struct { - URL string - Path string - Raw []byte -} +type PlanModuleFile = v1.PlanModuleFile // Reader contains the reader to load the bundle from. -type Reader struct { - loader DirectoryLoader - includeManifestInData bool - metrics metrics.Metrics - baseDir string - verificationConfig *VerificationConfig - skipVerify bool - processAnnotations bool - jsonOptions *astJSON.Options - capabilities *ast.Capabilities - files map[string]FileInfo // files in the bundle signature payload - sizeLimitBytes int64 - etag string - lazyLoadingMode bool - name string - persist bool - regoVersion ast.RegoVersion - followSymlinks bool -} +type Reader = v1.Reader // NewReader is deprecated. Use NewCustomReader instead. func NewReader(r io.Reader) *Reader { - return NewCustomReader(NewTarballLoader(r)) + return v1.NewReader(r).WithRegoVersion(ast.DefaultRegoVersion) } // NewCustomReader returns a new Reader configured to use the // specified DirectoryLoader. func NewCustomReader(loader DirectoryLoader) *Reader { - nr := Reader{ - loader: loader, - metrics: metrics.New(), - files: make(map[string]FileInfo), - sizeLimitBytes: DefaultSizeLimitBytes + 1, - } - return &nr -} - -// IncludeManifestInData sets whether the manifest metadata should be -// included in the bundle's data. -func (r *Reader) IncludeManifestInData(includeManifestInData bool) *Reader { - r.includeManifestInData = includeManifestInData - return r -} - -// WithMetrics sets the metrics object to be used while loading bundles -func (r *Reader) WithMetrics(m metrics.Metrics) *Reader { - r.metrics = m - return r -} - -// WithBaseDir sets a base directory for file paths of loaded Rego -// modules. This will *NOT* affect the loaded path of data files. -func (r *Reader) WithBaseDir(dir string) *Reader { - r.baseDir = dir - return r -} - -// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle -func (r *Reader) WithBundleVerificationConfig(config *VerificationConfig) *Reader { - r.verificationConfig = config - return r -} - -// WithSkipBundleVerification skips verification of a signed bundle -func (r *Reader) WithSkipBundleVerification(skipVerify bool) *Reader { - r.skipVerify = skipVerify - return r -} - -// WithProcessAnnotations enables annotation processing during .rego file parsing. -func (r *Reader) WithProcessAnnotations(yes bool) *Reader { - r.processAnnotations = yes - return r -} - -// WithCapabilities sets the supported capabilities when loading the files -func (r *Reader) WithCapabilities(caps *ast.Capabilities) *Reader { - r.capabilities = caps - return r -} - -// WithJSONOptions sets the JSONOptions to use when parsing policy files -func (r *Reader) WithJSONOptions(opts *astJSON.Options) *Reader { - r.jsonOptions = opts - return r -} - -// WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger -// than this, an error will be returned by the reader. -func (r *Reader) WithSizeLimitBytes(n int64) *Reader { - r.sizeLimitBytes = n + 1 - return r -} - -// WithBundleEtag sets the given etag value on the bundle -func (r *Reader) WithBundleEtag(etag string) *Reader { - r.etag = etag - return r -} - -// WithBundleName specifies the bundle name -func (r *Reader) WithBundleName(name string) *Reader { - r.name = name - return r -} - -func (r *Reader) WithFollowSymlinks(yes bool) *Reader { - r.followSymlinks = yes - return r -} - -// WithLazyLoadingMode sets the bundle loading mode. If true, -// bundles will be read in lazy mode. In this mode, data files in the bundle will not be -// deserialized and the check to validate that the bundle data does not contain paths -// outside the bundle's roots will not be performed while reading the bundle. -func (r *Reader) WithLazyLoadingMode(yes bool) *Reader { - r.lazyLoadingMode = yes - return r -} - -// WithBundlePersistence specifies if the downloaded bundle will eventually be persisted to disk. -func (r *Reader) WithBundlePersistence(persist bool) *Reader { - r.persist = persist - return r -} - -func (r *Reader) WithRegoVersion(version ast.RegoVersion) *Reader { - r.regoVersion = version - return r -} - -func (r *Reader) ParserOptions() ast.ParserOptions { - return ast.ParserOptions{ - ProcessAnnotation: r.processAnnotations, - Capabilities: r.capabilities, - JSONOptions: r.jsonOptions, - RegoVersion: r.regoVersion, - } -} - -// Read returns a new Bundle loaded from the reader. -func (r *Reader) Read() (Bundle, error) { - - var bundle Bundle - var descriptors []*Descriptor - var err error - var raw []Raw - - bundle.Signatures, bundle.Patch, descriptors, err = preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes) - if err != nil { - return bundle, err - } - - bundle.lazyLoadingMode = r.lazyLoadingMode - bundle.sizeLimitBytes = r.sizeLimitBytes - - if bundle.Type() == SnapshotBundleType { - err = r.checkSignaturesAndDescriptors(bundle.Signatures) - if err != nil { - return bundle, err - } - - bundle.Data = map[string]interface{}{} - } - - var modules []ModuleFile - for _, f := range descriptors { - buf, err := readFile(f, r.sizeLimitBytes) - if err != nil { - return bundle, err - } - - // verify the file content - if bundle.Type() == SnapshotBundleType && !bundle.Signatures.isEmpty() { - path := f.Path() - if r.baseDir != "" { - path = f.URL() - } - path = strings.TrimPrefix(path, "/") - - // check if the file is to be excluded from bundle verification - if r.isFileExcluded(path) { - delete(r.files, path) - } else { - if err = r.verifyBundleFile(path, buf); err != nil { - return bundle, err - } - } - } - - // Normalize the paths to use `/` separators - path := filepath.ToSlash(f.Path()) - - if strings.HasSuffix(path, RegoExt) { - fullPath := r.fullPath(path) - bs := buf.Bytes() - - if r.lazyLoadingMode { - p := fullPath - if r.name != "" { - p = modulePathWithPrefix(r.name, fullPath) - } - - raw = append(raw, Raw{Path: p, Value: bs}) - } - - // Modules are parsed after we've had a chance to read the manifest - mf := ModuleFile{ - URL: f.URL(), - Path: fullPath, - RelativePath: path, - Raw: bs, - } - modules = append(modules, mf) - } else if filepath.Base(path) == WasmFile { - bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{ - URL: f.URL(), - Path: r.fullPath(path), - Raw: buf.Bytes(), - }) - } else if filepath.Base(path) == PlanFile { - bundle.PlanModules = append(bundle.PlanModules, PlanModuleFile{ - URL: f.URL(), - Path: r.fullPath(path), - Raw: buf.Bytes(), - }) - } else if filepath.Base(path) == dataFile { - if r.lazyLoadingMode { - raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) - continue - } - - var value interface{} - - r.metrics.Timer(metrics.RegoDataParse).Start() - err := util.UnmarshalJSON(buf.Bytes(), &value) - r.metrics.Timer(metrics.RegoDataParse).Stop() - - if err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) - } - - if err := insertValue(&bundle, path, value); err != nil { - return bundle, err - } - - } else if filepath.Base(path) == yamlDataFile || filepath.Base(path) == ymlDataFile { - if r.lazyLoadingMode { - raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) - continue - } - - var value interface{} - - r.metrics.Timer(metrics.RegoDataParse).Start() - err := util.Unmarshal(buf.Bytes(), &value) - r.metrics.Timer(metrics.RegoDataParse).Stop() - - if err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) - } - - if err := insertValue(&bundle, path, value); err != nil { - return bundle, err - } - - } else if strings.HasSuffix(path, ManifestExt) { - if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest decode: %w", err) - } - } - } - - // Parse modules - popts := r.ParserOptions() - popts.RegoVersion = bundle.RegoVersion(popts.RegoVersion) - for _, mf := range modules { - modulePopts := popts - if modulePopts.RegoVersion, err = bundle.RegoVersionForFile(mf.RelativePath, popts.RegoVersion); err != nil { - return bundle, err - } - r.metrics.Timer(metrics.RegoModuleParse).Start() - mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, string(mf.Raw), modulePopts) - r.metrics.Timer(metrics.RegoModuleParse).Stop() - if err != nil { - return bundle, err - } - bundle.Modules = append(bundle.Modules, mf) - } - - if bundle.Type() == DeltaBundleType { - if len(bundle.Data) != 0 { - return bundle, fmt.Errorf("delta bundle expected to contain only patch file but data files found") - } - - if len(bundle.Modules) != 0 { - return bundle, fmt.Errorf("delta bundle expected to contain only patch file but policy files found") - } - - if len(bundle.WasmModules) != 0 { - return bundle, fmt.Errorf("delta bundle expected to contain only patch file but wasm files found") - } - - if r.persist { - return bundle, fmt.Errorf("'persist' property is true in config. persisting delta bundle to disk is not supported") - } - } - - // check if the bundle signatures specify any files that weren't found in the bundle - if bundle.Type() == SnapshotBundleType && len(r.files) != 0 { - extra := []string{} - for k := range r.files { - extra = append(extra, k) - } - return bundle, fmt.Errorf("file(s) %v specified in bundle signatures but not found in the target bundle", extra) - } - - if err := bundle.Manifest.validateAndInjectDefaults(bundle); err != nil { - return bundle, err - } - - // Inject the wasm module entrypoint refs into the WasmModuleFile structs - epMap := map[string][]string{} - for _, r := range bundle.Manifest.WasmResolvers { - epMap[r.Module] = append(epMap[r.Module], r.Entrypoint) - } - for i := 0; i < len(bundle.WasmModules); i++ { - entrypoints := epMap[bundle.WasmModules[i].Path] - for _, entrypoint := range entrypoints { - ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint) - if err != nil { - return bundle, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err) - } - bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref) - } - } - - if r.includeManifestInData { - var metadata map[string]interface{} - - b, err := json.Marshal(&bundle.Manifest) - if err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest marshal: %w", err) - } - - err = util.UnmarshalJSON(b, &metadata) - if err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err) - } - - // For backwards compatibility always write to the old unnamed manifest path - // This will *not* be correct if >1 bundle is in use... - if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err) - } - } - - bundle.Etag = r.etag - bundle.Raw = raw - - return bundle, nil -} - -func (r *Reader) isFileExcluded(path string) bool { - for _, e := range r.verificationConfig.Exclude { - match, _ := filepath.Match(e, path) - if match { - return true - } - } - return false -} - -func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) error { - if r.skipVerify { - return nil - } - - if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" { - return fmt.Errorf("bundle missing .signatures.json file") - } - - if !signatures.isEmpty() { - if r.verificationConfig == nil { - return fmt.Errorf("verification key not provided") - } - - // verify the JWT signatures included in the `.signatures.json` file - if err := r.verifyBundleSignature(signatures); err != nil { - return err - } - } - return nil -} - -func (r *Reader) verifyBundleSignature(sc SignaturesConfig) error { - var err error - r.files, err = VerifyBundleSignature(sc, r.verificationConfig) - return err -} - -func (r *Reader) verifyBundleFile(path string, data bytes.Buffer) error { - return VerifyBundleFile(path, data, r.files) -} - -func (r *Reader) fullPath(path string) string { - if r.baseDir != "" { - path = filepath.Join(r.baseDir, path) - } - return path + return v1.NewCustomReader(loader).WithRegoVersion(ast.DefaultRegoVersion) } // Write is deprecated. Use NewWriter instead. func Write(w io.Writer, bundle Bundle) error { - return NewWriter(w). - UseModulePath(true). - DisableFormat(true). - Write(bundle) + return v1.Write(w, bundle) } // Writer implements bundle serialization. -type Writer struct { - usePath bool - disableFormat bool - w io.Writer -} +type Writer = v1.Writer // NewWriter returns a bundle writer that writes to w. func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - } -} - -// UseModulePath configures the writer to use the module file path instead of the -// module file URL during serialization. This is for backwards compatibility. -func (w *Writer) UseModulePath(yes bool) *Writer { - w.usePath = yes - return w -} - -// DisableFormat configures the writer to just write out raw bytes instead -// of formatting modules before serialization. -func (w *Writer) DisableFormat(yes bool) *Writer { - w.disableFormat = yes - return w -} - -// Write writes the bundle to the writer's output stream. -func (w *Writer) Write(bundle Bundle) error { - gw := gzip.NewWriter(w.w) - tw := tar.NewWriter(gw) - - bundleType := bundle.Type() - - if bundleType == SnapshotBundleType { - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Data); err != nil { - return err - } - - if err := archive.WriteFile(tw, "data.json", buf.Bytes()); err != nil { - return err - } - - for _, module := range bundle.Modules { - path := module.URL - if w.usePath { - path = module.Path - } - - if err := archive.WriteFile(tw, path, module.Raw); err != nil { - return err - } - } - - if err := w.writeWasm(tw, bundle); err != nil { - return err - } - - if err := writeSignatures(tw, bundle); err != nil { - return err - } - - if err := w.writePlan(tw, bundle); err != nil { - return err - } - } else if bundleType == DeltaBundleType { - if err := writePatch(tw, bundle); err != nil { - return err - } - } - - if err := writeManifest(tw, bundle); err != nil { - return err - } - - if err := tw.Close(); err != nil { - return err - } - - return gw.Close() -} - -func (w *Writer) writeWasm(tw *tar.Writer, bundle Bundle) error { - for _, wm := range bundle.WasmModules { - path := wm.URL - if w.usePath { - path = wm.Path - } - - err := archive.WriteFile(tw, path, wm.Raw) - if err != nil { - return err - } - } - - if len(bundle.Wasm) > 0 { - err := archive.WriteFile(tw, "/"+WasmFile, bundle.Wasm) - if err != nil { - return err - } - } - - return nil -} - -func (w *Writer) writePlan(tw *tar.Writer, bundle Bundle) error { - for _, wm := range bundle.PlanModules { - path := wm.URL - if w.usePath { - path = wm.Path - } - - err := archive.WriteFile(tw, path, wm.Raw) - if err != nil { - return err - } - } - - return nil -} - -func writeManifest(tw *tar.Writer, bundle Bundle) error { - - if bundle.Manifest.Empty() { - return nil - } - - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Manifest); err != nil { - return err - } - - return archive.WriteFile(tw, ManifestExt, buf.Bytes()) -} - -func writePatch(tw *tar.Writer, bundle Bundle) error { - - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Patch); err != nil { - return err - } - - return archive.WriteFile(tw, patchFile, buf.Bytes()) -} - -func writeSignatures(tw *tar.Writer, bundle Bundle) error { - - if bundle.Signatures.isEmpty() { - return nil - } - - bs, err := json.MarshalIndent(bundle.Signatures, "", " ") - if err != nil { - return err - } - - return archive.WriteFile(tw, fmt.Sprintf(".%v", SignaturesFile), bs) -} - -func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { - - files := []FileInfo{} - - bs, err := hash.HashFile(b.Data) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix("data.json", "/"), hex.EncodeToString(bs), defaultHashingAlg)) - - if len(b.Wasm) != 0 { - bs, err := hash.HashFile(b.Wasm) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix(WasmFile, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - for _, wasmModule := range b.WasmModules { - bs, err := hash.HashFile(wasmModule.Raw) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix(wasmModule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - for _, planmodule := range b.PlanModules { - bs, err := hash.HashFile(planmodule.Raw) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix(planmodule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - // If the manifest is essentially empty, don't add it to the signatures since it - // won't be written to the bundle. Otherwise: - // parse the manifest into a JSON structure; - // then recursively order the fields of all objects alphabetically and then apply - // the hash function to result to compute the hash. - if !b.Manifest.Empty() { - mbs, err := json.Marshal(b.Manifest) - if err != nil { - return files, err - } - - var result map[string]interface{} - if err := util.Unmarshal(mbs, &result); err != nil { - return files, err - } - - bs, err = hash.HashFile(result) - if err != nil { - return files, err - } - - files = append(files, NewFile(strings.TrimPrefix(ManifestExt, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - return files, err -} - -// FormatModules formats Rego modules -// Modules will be formatted to comply with rego-v0, but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). -func (b *Bundle) FormatModules(useModulePath bool) error { - return b.FormatModulesForRegoVersion(ast.RegoV0, true, useModulePath) -} - -// FormatModulesForRegoVersion formats Rego modules to comply with a given Rego version -func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveModuleRegoVersion bool, useModulePath bool) error { - var err error - - for i, module := range b.Modules { - opts := format.Opts{} - if preserveModuleRegoVersion { - opts.RegoVersion = module.Parsed.RegoVersion() - opts.ParserOptions = &ast.ParserOptions{ - RegoVersion: opts.RegoVersion, - } - } else { - opts.RegoVersion = version - } - - if module.Raw == nil { - module.Raw, err = format.AstWithOpts(module.Parsed, opts) - if err != nil { - return err - } - } else { - path := module.URL - if useModulePath { - path = module.Path - } - - module.Raw, err = format.SourceWithOpts(path, module.Raw, opts) - if err != nil { - return err - } - } - b.Modules[i].Raw = module.Raw - } - return nil -} - -// GenerateSignature generates the signature for the given bundle. -func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, useModulePath bool) error { - - hash, err := NewSignatureHasher(HashingAlgorithm(defaultHashingAlg)) - if err != nil { - return err - } - - files := []FileInfo{} - - for _, module := range b.Modules { - bytes, err := hash.HashFile(module.Raw) - if err != nil { - return err - } - - path := module.URL - if useModulePath { - path = module.Path - } - files = append(files, NewFile(strings.TrimPrefix(path, "/"), hex.EncodeToString(bytes), defaultHashingAlg)) - } - - result, err := hashBundleFiles(hash, b) - if err != nil { - return err - } - files = append(files, result...) - - // generate signed token - token, err := GenerateSignedToken(files, signingConfig, keyID) - if err != nil { - return err - } - - if b.Signatures.isEmpty() { - b.Signatures = SignaturesConfig{} - } - - if signingConfig.Plugin != "" { - b.Signatures.Plugin = signingConfig.Plugin - } - - b.Signatures.Signatures = []string{token} - - return nil -} - -// ParsedModules returns a map of parsed modules with names that are -// unique and human readable for the given a bundle name. -func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module { - - mods := make(map[string]*ast.Module, len(b.Modules)) - - for _, mf := range b.Modules { - mods[modulePathWithPrefix(bundleName, mf.Path)] = mf.Parsed - } - - return mods -} - -func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion { - if v := b.Manifest.RegoVersion; v != nil { - if *v == 0 { - return ast.RegoV0 - } else if *v == 1 { - return ast.RegoV1 - } - } - return def -} - -func (b *Bundle) SetRegoVersion(v ast.RegoVersion) { - b.Manifest.SetRegoVersion(v) -} - -// RegoVersionForFile returns the rego-version for the specified file path. -// If there is no defined version for the given path, the default version def is returned. -// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned. -func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) { - version, err := b.Manifest.numericRegoVersionForFile(path) - if err != nil { - return def, err - } else if version == nil { - return def, nil - } else if *version == 0 { - return ast.RegoV0, nil - } else if *version == 1 { - return ast.RegoV1, nil - } - return def, fmt.Errorf("unknown bundle rego-version %d for file '%s'", *version, path) -} - -func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) { - var version *int - - if len(m.FileRegoVersions) != len(m.compiledFileRegoVersions) { - m.compiledFileRegoVersions = make([]fileRegoVersion, 0, len(m.FileRegoVersions)) - for pattern, v := range m.FileRegoVersions { - compiled, err := glob.Compile(pattern) - if err != nil { - return nil, fmt.Errorf("failed to compile glob pattern %s: %s", pattern, err) - } - m.compiledFileRegoVersions = append(m.compiledFileRegoVersions, fileRegoVersion{compiled, v}) - } - } - - for _, fv := range m.compiledFileRegoVersions { - if fv.path.Match(path) { - version = &fv.version - break - } - } - - if version == nil { - version = m.RegoVersion - } - return version, nil -} - -// Equal returns true if this bundle's contents equal the other bundle's -// contents. -func (b Bundle) Equal(other Bundle) bool { - if !reflect.DeepEqual(b.Data, other.Data) { - return false - } - - if len(b.Modules) != len(other.Modules) { - return false - } - for i := range b.Modules { - // To support bundles built from rootless filesystems we ignore a "/" prefix - // for URLs and Paths, such that "/file" and "file" are equivalent - if strings.TrimPrefix(b.Modules[i].URL, string(filepath.Separator)) != - strings.TrimPrefix(other.Modules[i].URL, string(filepath.Separator)) { - return false - } - if strings.TrimPrefix(b.Modules[i].Path, string(filepath.Separator)) != - strings.TrimPrefix(other.Modules[i].Path, string(filepath.Separator)) { - return false - } - if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) { - return false - } - if !bytes.Equal(b.Modules[i].Raw, other.Modules[i].Raw) { - return false - } - } - if (b.Wasm == nil && other.Wasm != nil) || (b.Wasm != nil && other.Wasm == nil) { - return false - } - - return bytes.Equal(b.Wasm, other.Wasm) -} - -// Copy returns a deep copy of the bundle. -func (b Bundle) Copy() Bundle { - - // Copy data. - var x interface{} = b.Data - - if err := util.RoundTrip(&x); err != nil { - panic(err) - } - - if x != nil { - b.Data = x.(map[string]interface{}) - } - - // Copy modules. - for i := range b.Modules { - bs := make([]byte, len(b.Modules[i].Raw)) - copy(bs, b.Modules[i].Raw) - b.Modules[i].Raw = bs - b.Modules[i].Parsed = b.Modules[i].Parsed.Copy() - } - - // Copy manifest. - b.Manifest = b.Manifest.Copy() - - return b -} - -func (b *Bundle) insertData(key []string, value interface{}) error { - // Build an object with the full structure for the value - obj, err := mktree(key, value) - if err != nil { - return err - } - - // Merge the new data in with the current bundle data object - merged, ok := merge.InterfaceMaps(b.Data, obj) - if !ok { - return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) - } - - b.Data = merged - - return nil -} - -func (b *Bundle) readData(key []string) *interface{} { - - if len(key) == 0 { - if len(b.Data) == 0 { - return nil - } - var result interface{} = b.Data - return &result - } - - node := b.Data - - for i := 0; i < len(key)-1; i++ { - - child, ok := node[key[i]] - if !ok { - return nil - } - - childObj, ok := child.(map[string]interface{}) - if !ok { - return nil - } - - node = childObj - } - - child, ok := node[key[len(key)-1]] - if !ok { - return nil - } - - return &child -} - -// Type returns the type of the bundle. -func (b *Bundle) Type() string { - if len(b.Patch.Data) != 0 { - return DeltaBundleType - } - return SnapshotBundleType -} - -func mktree(path []string, value interface{}) (map[string]interface{}, error) { - if len(path) == 0 { - // For 0 length path the value is the full tree. - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("root value must be object") - } - return obj, nil - } - - dir := map[string]interface{}{} - for i := len(path) - 1; i > 0; i-- { - dir[path[i]] = value - value = dir - dir = map[string]interface{}{} - } - dir[path[0]] = value - - return dir, nil + return v1.NewWriter(w) } // Merge accepts a set of bundles and merges them into a single result bundle. If there are @@ -1393,7 +104,7 @@ func mktree(path []string, value interface{}) (map[string]interface{}, error) { // will have an empty revision except in the special case where a single bundle is provided // (and in that case the bundle is just returned unmodified.) func Merge(bundles []*Bundle) (*Bundle, error) { - return MergeWithRegoVersion(bundles, ast.RegoV0, false) + return MergeWithRegoVersion(bundles, ast.DefaultRegoVersion, false) } // MergeWithRegoVersion creates a merged bundle from the provided bundles, similar to Merge. @@ -1405,348 +116,19 @@ func Merge(bundles []*Bundle) (*Bundle, error) { // If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's // ModuleFile.URL will be used. func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) { - - if len(bundles) == 0 { - return nil, errors.New("expected at least one bundle") - } - - if len(bundles) == 1 { - result := bundles[0] - // We respect the bundle rego-version, defaulting to the provided rego version if not set. - result.SetRegoVersion(result.RegoVersion(regoVersion)) - fileRegoVersions, err := bundleRegoVersions(result, result.RegoVersion(regoVersion), usePath) - if err != nil { - return nil, err - } - result.Manifest.FileRegoVersions = fileRegoVersions - return result, nil + if regoVersion == ast.RegoUndefined { + regoVersion = ast.DefaultRegoVersion } - var roots []string - var result Bundle - - for _, b := range bundles { - - if b.Manifest.Roots == nil { - return nil, errors.New("bundle manifest not initialized") - } - - roots = append(roots, *b.Manifest.Roots...) - - result.Modules = append(result.Modules, b.Modules...) - - for _, root := range *b.Manifest.Roots { - key := strings.Split(root, "/") - if val := b.readData(key); val != nil { - if err := result.insertData(key, *val); err != nil { - return nil, err - } - } - } - - result.Manifest.WasmResolvers = append(result.Manifest.WasmResolvers, b.Manifest.WasmResolvers...) - result.WasmModules = append(result.WasmModules, b.WasmModules...) - result.PlanModules = append(result.PlanModules, b.PlanModules...) - - if b.Manifest.RegoVersion != nil || len(b.Manifest.FileRegoVersions) > 0 { - if result.Manifest.FileRegoVersions == nil { - result.Manifest.FileRegoVersions = map[string]int{} - } - - fileRegoVersions, err := bundleRegoVersions(b, regoVersion, usePath) - if err != nil { - return nil, err - } - for k, v := range fileRegoVersions { - result.Manifest.FileRegoVersions[k] = v - } - } - } - - // We respect the bundle rego-version, defaulting to the provided rego version if not set. - result.SetRegoVersion(result.RegoVersion(regoVersion)) - - if result.Data == nil { - result.Data = map[string]interface{}{} - } - - result.Manifest.Roots = &roots - - if err := result.Manifest.validateAndInjectDefaults(result); err != nil { - return nil, err - } - - return &result, nil -} - -func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath bool) (map[string]int, error) { - fileRegoVersions := map[string]int{} - - // we drop the bundle-global rego versions and record individual rego versions for each module. - for _, m := range bundle.Modules { - // We fetch rego-version by the path relative to the bundle root, as the complete path of the module might - // contain the path between OPA working directory and the bundle root. - v, err := bundle.RegoVersionForFile(bundleRelativePath(m, usePath), bundle.RegoVersion(regoVersion)) - if err != nil { - return nil, err - } - // only record the rego version if it's different from one applied globally to the result bundle - if v != regoVersion { - // We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path - // to the module inside the merged bundle. - fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int() - } - } - - return fileRegoVersions, nil -} - -func bundleRelativePath(m ModuleFile, usePath bool) string { - p := m.RelativePath - if p == "" { - if usePath { - p = m.Path - } else { - p = m.URL - } - } - return p -} - -func bundleAbsolutePath(m ModuleFile, usePath bool) string { - var p string - if usePath { - p = m.Path - } else { - p = m.URL - } - if !path.IsAbs(p) { - p = "/" + p - } - return path.Clean(p) + return v1.MergeWithRegoVersion(bundles, regoVersion, usePath) } // RootPathsOverlap takes in two bundle root paths and returns true if they overlap. func RootPathsOverlap(pathA string, pathB string) bool { - a := rootPathSegments(pathA) - b := rootPathSegments(pathB) - return rootContains(a, b) || rootContains(b, a) + return v1.RootPathsOverlap(pathA, pathB) } // RootPathsContain takes a set of bundle root paths and returns true if the path is contained. func RootPathsContain(roots []string, path string) bool { - segments := rootPathSegments(path) - for i := range roots { - if rootContains(rootPathSegments(roots[i]), segments) { - return true - } - } - return false -} - -func rootPathSegments(path string) []string { - return strings.Split(path, "/") -} - -func rootContains(root []string, other []string) bool { - - // A single segment, empty string root always contains the other. - if len(root) == 1 && root[0] == "" { - return true - } - - if len(root) > len(other) { - return false - } - - for j := range root { - if root[j] != other[j] { - return false - } - } - - return true -} - -func insertValue(b *Bundle, path string, value interface{}) error { - if err := b.insertData(getNormalizedPath(path), value); err != nil { - return fmt.Errorf("bundle load failed on %v: %w", path, err) - } - return nil -} - -func getNormalizedPath(path string) []string { - // Remove leading / and . characters from the directory path. If the bundle - // was written with OPA then the paths will contain a leading slash. On the - // other hand, if the path is empty, filepath.Dir will return '.'. - // Note: filepath.Dir can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - dirpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.") - var key []string - if dirpath != "" { - key = strings.Split(dirpath, "/") - } - return key -} - -func dfs(value interface{}, path string, fn func(string, interface{}) (bool, error)) error { - if stop, err := fn(path, value); err != nil { - return err - } else if stop { - return nil - } - obj, ok := value.(map[string]interface{}) - if !ok { - return nil - } - for key := range obj { - if err := dfs(obj[key], path+"/"+key, fn); err != nil { - return err - } - } - return nil -} - -func modulePathWithPrefix(bundleName string, modulePath string) string { - // Default prefix is just the bundle name - prefix := bundleName - - // Bundle names are sometimes just file paths, some of which - // are full urls (file:///foo/). Parse these and only use the path. - parsed, err := url.Parse(bundleName) - if err == nil { - prefix = filepath.Join(parsed.Host, parsed.Path) - } - - // Note: filepath.Join can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - return normalizePath(filepath.Join(prefix, modulePath)) -} - -// IsStructuredDoc checks if the file name equals a structured file extension ex. ".json" -func IsStructuredDoc(name string) bool { - return filepath.Base(name) == dataFile || filepath.Base(name) == yamlDataFile || - filepath.Base(name) == SignaturesFile || filepath.Base(name) == ManifestExt -} - -func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (SignaturesConfig, Patch, []*Descriptor, error) { - descriptors := []*Descriptor{} - var signatures SignaturesConfig - var patch Patch - - for { - f, err := loader.NextFile() - if err == io.EOF { - break - } - - if err != nil { - return signatures, patch, nil, fmt.Errorf("bundle read failed: %w", err) - } - - // check for the signatures file - if !skipVerify && strings.HasSuffix(f.Path(), SignaturesFile) { - buf, err := readFile(f, sizeLimitBytes) - if err != nil { - return signatures, patch, nil, err - } - - if err := util.NewJSONDecoder(&buf).Decode(&signatures); err != nil { - return signatures, patch, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err) - } - } else if !strings.HasSuffix(f.Path(), SignaturesFile) { - descriptors = append(descriptors, f) - - if filepath.Base(f.Path()) == patchFile { - - var b bytes.Buffer - tee := io.TeeReader(f.reader, &b) - f.reader = tee - - buf, err := readFile(f, sizeLimitBytes) - if err != nil { - return signatures, patch, nil, err - } - - if err := util.NewJSONDecoder(&buf).Decode(&patch); err != nil { - return signatures, patch, nil, fmt.Errorf("bundle load failed on patch decode: %w", err) - } - - f.reader = &b - } - } - } - return signatures, patch, descriptors, nil -} - -func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { - // Case for pre-loaded byte buffers, like those from the tarballLoader. - if bb, ok := f.reader.(*bytes.Buffer); ok { - _ = f.Close() // always close, even on error - - if int64(bb.Len()) >= sizeLimitBytes { - return *bb, fmt.Errorf("bundle file '%v' size (%d bytes) exceeded max size (%v bytes)", - strings.TrimPrefix(f.Path(), "/"), bb.Len(), sizeLimitBytes-1) - } - - return *bb, nil - } - - // Case for *lazyFile readers: - if lf, ok := f.reader.(*lazyFile); ok { - var buf bytes.Buffer - if lf.file == nil { - var err error - if lf.file, err = os.Open(lf.path); err != nil { - return buf, fmt.Errorf("failed to open file %s: %w", f.path, err) - } - } - // Bail out if we can't read the whole file-- there's nothing useful we can do at that point! - fileSize, _ := fstatFileSize(lf.file) - if fileSize > sizeLimitBytes { - return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), fileSize, sizeLimitBytes-1) - } - // Prealloc the buffer for the file read. - buffer := make([]byte, fileSize) - _, err := io.ReadFull(lf.file, buffer) - if err != nil { - return buf, err - } - _ = lf.file.Close() // always close, even on error - - // Note(philipc): Replace the lazyFile reader in the *Descriptor with a - // pointer to the wrapping bytes.Buffer, so that we don't re-read the - // file on disk again by accident. - buf = *bytes.NewBuffer(buffer) - f.reader = &buf - return buf, nil - } - - // Fallback case: - var buf bytes.Buffer - n, err := f.Read(&buf, sizeLimitBytes) - _ = f.Close() // always close, even on error - - if err != nil && err != io.EOF { - return buf, err - } else if err == nil && n >= sizeLimitBytes { - return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), n, sizeLimitBytes-1) - } - - return buf, nil -} - -// Takes an already open file handle and invokes the os.Stat system call on it -// to determine the file's size. Passes any errors from *File.Stat on up to the -// caller. -func fstatFileSize(f *os.File) (int64, error) { - fileInfo, err := f.Stat() - if err != nil { - return 0, err - } - return fileInfo.Size(), nil -} - -func normalizePath(p string) string { - return filepath.ToSlash(p) + return v1.RootPathsContain(roots, path) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/doc.go b/vendor/github.com/open-policy-agent/opa/bundle/doc.go new file mode 100644 index 00000000000..7ec7c9b3328 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/bundle/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package bundle diff --git a/vendor/github.com/open-policy-agent/opa/bundle/file.go b/vendor/github.com/open-policy-agent/opa/bundle/file.go index 80b1a87eb17..ccb7b235109 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/file.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/file.go @@ -1,508 +1,50 @@ package bundle import ( - "archive/tar" - "bytes" - "compress/gzip" - "fmt" "io" - "io/fs" - "os" - "path/filepath" - "sort" - "strings" - "sync" - - "github.com/open-policy-agent/opa/loader/filter" "github.com/open-policy-agent/opa/storage" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)" - // Descriptor contains information about a file and // can be used to read the file contents. -type Descriptor struct { - url string - path string - reader io.Reader - closer io.Closer - closeOnce *sync.Once -} - -// lazyFile defers reading the file until the first call of Read -type lazyFile struct { - path string - file *os.File -} - -// newLazyFile creates a new instance of lazyFile -func newLazyFile(path string) *lazyFile { - return &lazyFile{path: path} -} - -// Read implements io.Reader. It will check if the file has been opened -// and open it if it has not before attempting to read using the file's -// read method -func (f *lazyFile) Read(b []byte) (int, error) { - var err error - - if f.file == nil { - if f.file, err = os.Open(f.path); err != nil { - return 0, fmt.Errorf("failed to open file %s: %w", f.path, err) - } - } - - return f.file.Read(b) -} - -// Close closes the lazy file if it has been opened using the file's -// close method -func (f *lazyFile) Close() error { - if f.file != nil { - return f.file.Close() - } - - return nil -} +type Descriptor = v1.Descriptor func NewDescriptor(url, path string, reader io.Reader) *Descriptor { - return &Descriptor{ - url: url, - path: path, - reader: reader, - } -} - -func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor { - d.closer = closer - d.closeOnce = new(sync.Once) - return d -} - -// Path returns the path of the file. -func (d *Descriptor) Path() string { - return d.path -} - -// URL returns the url of the file. -func (d *Descriptor) URL() string { - return d.url -} - -// Read will read all the contents from the file the Descriptor refers to -// into the dest writer up n bytes. Will return an io.EOF error -// if EOF is encountered before n bytes are read. -func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) { - n, err := io.CopyN(dest, d.reader, n) - return n, err + return v1.NewDescriptor(url, path, reader) } -// Close the file, on some Loader implementations this might be a no-op. -// It should *always* be called regardless of file. -func (d *Descriptor) Close() error { - var err error - if d.closer != nil { - d.closeOnce.Do(func() { - err = d.closer.Close() - }) - } - return err -} - -type PathFormat int64 +type PathFormat = v1.PathFormat const ( - Chrooted PathFormat = iota - SlashRooted - Passthrough + Chrooted = v1.Chrooted + SlashRooted = v1.SlashRooted + Passthrough = v1.Passthrough ) // DirectoryLoader defines an interface which can be used to load // files from a directory by iterating over each one in the tree. -type DirectoryLoader interface { - // NextFile must return io.EOF if there is no next value. The returned - // descriptor should *always* be closed when no longer needed. - NextFile() (*Descriptor, error) - WithFilter(filter filter.LoaderFilter) DirectoryLoader - WithPathFormat(PathFormat) DirectoryLoader - WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader - WithFollowSymlinks(followSymlinks bool) DirectoryLoader -} - -type dirLoader struct { - root string - files []string - idx int - filter filter.LoaderFilter - pathFormat PathFormat - maxSizeLimitBytes int64 - followSymlinks bool -} - -// Normalize root directory, ex "./src/bundle" -> "src/bundle" -// We don't need an absolute path, but this makes the joined/trimmed -// paths more uniform. -func normalizeRootDirectory(root string) string { - if len(root) > 1 { - if root[0] == '.' && root[1] == filepath.Separator { - if len(root) == 2 { - root = root[:1] // "./" -> "." - } else { - root = root[2:] // remove leading "./" - } - } - } - return root -} +type DirectoryLoader = v1.DirectoryLoader // NewDirectoryLoader returns a basic DirectoryLoader implementation // that will load files from a given root directory path. func NewDirectoryLoader(root string) DirectoryLoader { - d := dirLoader{ - root: normalizeRootDirectory(root), - pathFormat: Chrooted, - } - return &d -} - -// WithFilter specifies the filter object to use to filter files while loading bundles -func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { - d.filter = filter - return d -} - -// WithPathFormat specifies how a path is formatted in a Descriptor -func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { - d.pathFormat = pathFormat - return d -} - -// WithSizeLimitBytes specifies the maximum size of any file in the directory to read -func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { - d.maxSizeLimitBytes = sizeLimitBytes - return d -} - -// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory -func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { - d.followSymlinks = followSymlinks - return d -} - -func formatPath(fileName string, root string, pathFormat PathFormat) string { - switch pathFormat { - case SlashRooted: - if !strings.HasPrefix(fileName, string(filepath.Separator)) { - return string(filepath.Separator) + fileName - } - return fileName - case Chrooted: - // Trim off the root directory and return path as if chrooted - result := strings.TrimPrefix(fileName, filepath.FromSlash(root)) - if root == "." && filepath.Base(fileName) == ManifestExt { - result = fileName - } - if !strings.HasPrefix(result, string(filepath.Separator)) { - result = string(filepath.Separator) + result - } - return result - case Passthrough: - fallthrough - default: - return fileName - } -} - -// NextFile iterates to the next file in the directory tree -// and returns a file Descriptor for the file. -func (d *dirLoader) NextFile() (*Descriptor, error) { - // build a list of all files we will iterate over and read, but only one time - if d.files == nil { - d.files = []string{} - err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error { - if info == nil { - return nil - } - - if info.Mode().IsRegular() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) - } - d.files = append(d.files, path) - } else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) - } - d.files = append(d.files, path) - } else if info.Mode().IsDir() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { - return filepath.SkipDir - } - } - return nil - }) - if err != nil { - return nil, fmt.Errorf("failed to list files: %w", err) - } - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if d.idx >= len(d.files) { - return nil, io.EOF - } - - fileName := d.files[d.idx] - d.idx++ - fh := newLazyFile(fileName) - - cleanedPath := formatPath(fileName, d.root, d.pathFormat) - f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh) - return f, nil -} - -type tarballLoader struct { - baseURL string - r io.Reader - tr *tar.Reader - files []file - idx int - filter filter.LoaderFilter - skipDir map[string]struct{} - pathFormat PathFormat - maxSizeLimitBytes int64 -} - -type file struct { - name string - reader io.Reader - path storage.Path - raw []byte + return v1.NewDirectoryLoader(root) } // NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead. func NewTarballLoader(r io.Reader) DirectoryLoader { - l := tarballLoader{ - r: r, - pathFormat: Passthrough, - } - return &l + return v1.NewTarballLoader(r) } // NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads // files out of a gzipped tar archive. The file URLs will be prefixed // with the baseURL. func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader { - l := tarballLoader{ - baseURL: strings.TrimSuffix(baseURL, "/"), - r: r, - pathFormat: Passthrough, - } - return &l -} - -// WithFilter specifies the filter object to use to filter files while loading bundles -func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { - t.filter = filter - return t -} - -// WithPathFormat specifies how a path is formatted in a Descriptor -func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { - t.pathFormat = pathFormat - return t -} - -// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read -func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { - t.maxSizeLimitBytes = sizeLimitBytes - return t -} - -// WithFollowSymlinks is a no-op for tarballLoader -func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader { - return t -} - -// NextFile iterates to the next file in the directory tree -// and returns a file Descriptor for the file. -func (t *tarballLoader) NextFile() (*Descriptor, error) { - if t.tr == nil { - gr, err := gzip.NewReader(t.r) - if err != nil { - return nil, fmt.Errorf("archive read failed: %w", err) - } - - t.tr = tar.NewReader(gr) - } - - if t.files == nil { - t.files = []file{} - - if t.skipDir == nil { - t.skipDir = map[string]struct{}{} - } - - for { - header, err := t.tr.Next() - - if err == io.EOF { - break - } - - if err != nil { - return nil, err - } - - // Keep iterating on the archive until we find a normal file - if header.Typeflag == tar.TypeReg { - - if t.filter != nil { - - if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) { - continue - } - - basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/") - - // check if the directory is to be skipped - if _, ok := t.skipDir[basePath]; ok { - continue - } - - match := false - for p := range t.skipDir { - if strings.HasPrefix(basePath, p) { - match = true - break - } - } - - if match { - continue - } - } - - if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes { - return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes) - } - - f := file{name: header.Name} - - // Note(philipc): We rely on the previous size check in this loop for safety. - buf := bytes.NewBuffer(make([]byte, 0, header.Size)) - if _, err := io.Copy(buf, t.tr); err != nil { - return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err) - } - - f.reader = buf - - t.files = append(t.files, f) - } else if header.Typeflag == tar.TypeDir { - cleanedPath := filepath.ToSlash(header.Name) - if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) { - t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{} - } - } - } - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if t.idx >= len(t.files) { - return nil, io.EOF - } - - f := t.files[t.idx] - t.idx++ - - cleanedPath := formatPath(f.name, "", t.pathFormat) - d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader) - return d, nil -} - -// Next implements the storage.Iterator interface. -// It iterates to the next policy or data file in the directory tree -// and returns a storage.Update for the file. -func (it *iterator) Next() (*storage.Update, error) { - if it.files == nil { - it.files = []file{} - - for _, item := range it.raw { - f := file{name: item.Path} - - fpath := strings.TrimLeft(normalizePath(filepath.Dir(f.name)), "/.") - if strings.HasSuffix(f.name, RegoExt) { - fpath = strings.Trim(normalizePath(f.name), "/") - } - - p, ok := storage.ParsePathEscaped("/" + fpath) - if !ok { - return nil, fmt.Errorf("storage path invalid: %v", f.name) - } - f.path = p - - f.raw = item.Value - - it.files = append(it.files, f) - } - - sortFilePathAscend(it.files) - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if it.idx >= len(it.files) { - return nil, io.EOF - } - - f := it.files[it.idx] - it.idx++ - - isPolicy := false - if strings.HasSuffix(f.name, RegoExt) { - isPolicy = true - } - - return &storage.Update{ - Path: f.path, - Value: f.raw, - IsPolicy: isPolicy, - }, nil -} - -type iterator struct { - raw []Raw - files []file - idx int + return v1.NewTarballLoaderWithBaseURL(r, baseURL) } func NewIterator(raw []Raw) storage.Iterator { - it := iterator{ - raw: raw, - } - return &it -} - -func sortFilePathAscend(files []file) { - sort.Slice(files, func(i, j int) bool { - return len(files[i].path) < len(files[j].path) - }) -} - -func getdepth(path string, isDir bool) int { - if isDir { - cleanedPath := strings.Trim(filepath.ToSlash(path), "/") - return len(strings.Split(cleanedPath, "/")) - } - - basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/") - return len(strings.Split(basePath, "/")) + return v1.NewIterator(raw) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go index a3a0dbf2040..16e00928dad 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go @@ -4,140 +4,19 @@ package bundle import ( - "fmt" - "io" "io/fs" - "path/filepath" - "sync" - "github.com/open-policy-agent/opa/loader/filter" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const ( - defaultFSLoaderRoot = "." -) - -type dirLoaderFS struct { - sync.Mutex - filesystem fs.FS - files []string - idx int - filter filter.LoaderFilter - root string - pathFormat PathFormat - maxSizeLimitBytes int64 - followSymlinks bool -} - // NewFSLoader returns a basic DirectoryLoader implementation // that will load files from a fs.FS interface func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) { - return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil + return v1.NewFSLoader(filesystem) } // NewFSLoaderWithRoot returns a basic DirectoryLoader implementation // that will load files from a fs.FS interface at the supplied root func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader { - d := dirLoaderFS{ - filesystem: filesystem, - root: normalizeRootDirectory(root), - pathFormat: Chrooted, - } - - return &d -} - -func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error { - if err != nil { - return err - } - - if dirEntry != nil { - info, err := dirEntry.Info() - if err != nil { - return err - } - - if dirEntry.Type().IsRegular() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) - } - - d.files = append(d.files, path) - } else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) - } - - d.files = append(d.files, path) - } else if dirEntry.Type().IsDir() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { - return fs.SkipDir - } - } - } - return nil -} - -// WithFilter specifies the filter object to use to filter files while loading bundles -func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader { - d.filter = filter - return d -} - -// WithPathFormat specifies how a path is formatted in a Descriptor -func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader { - d.pathFormat = pathFormat - return d -} - -// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read -func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { - d.maxSizeLimitBytes = sizeLimitBytes - return d -} - -func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { - d.followSymlinks = followSymlinks - return d -} - -// NextFile iterates to the next file in the directory tree -// and returns a file Descriptor for the file. -func (d *dirLoaderFS) NextFile() (*Descriptor, error) { - d.Lock() - defer d.Unlock() - - if d.files == nil { - err := fs.WalkDir(d.filesystem, d.root, d.walkDir) - if err != nil { - return nil, fmt.Errorf("failed to list files: %w", err) - } - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if d.idx >= len(d.files) { - return nil, io.EOF - } - - fileName := d.files[d.idx] - d.idx++ - - fh, err := d.filesystem.Open(fileName) - if err != nil { - return nil, fmt.Errorf("failed to open file %s: %w", fileName, err) - } - - cleanedPath := formatPath(fileName, d.root, d.pathFormat) - f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh) - return f, nil + return v1.NewFSLoaderWithRoot(filesystem, root) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/hash.go b/vendor/github.com/open-policy-agent/opa/bundle/hash.go index 021801bb0aa..d4cc601dead 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/hash.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/hash.go @@ -5,137 +5,28 @@ package bundle import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/json" - "fmt" - "hash" - "io" - "sort" - "strings" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // HashingAlgorithm represents a subset of hashing algorithms implemented in Go -type HashingAlgorithm string +type HashingAlgorithm = v1.HashingAlgorithm // Supported values for HashingAlgorithm const ( - MD5 HashingAlgorithm = "MD5" - SHA1 HashingAlgorithm = "SHA-1" - SHA224 HashingAlgorithm = "SHA-224" - SHA256 HashingAlgorithm = "SHA-256" - SHA384 HashingAlgorithm = "SHA-384" - SHA512 HashingAlgorithm = "SHA-512" - SHA512224 HashingAlgorithm = "SHA-512-224" - SHA512256 HashingAlgorithm = "SHA-512-256" + MD5 = v1.MD5 + SHA1 = v1.SHA1 + SHA224 = v1.SHA224 + SHA256 = v1.SHA256 + SHA384 = v1.SHA384 + SHA512 = v1.SHA512 + SHA512224 = v1.SHA512224 + SHA512256 = v1.SHA512256 ) -// String returns the string representation of a HashingAlgorithm -func (alg HashingAlgorithm) String() string { - return string(alg) -} - // SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy -type SignatureHasher interface { - HashFile(v interface{}) ([]byte, error) -} - -type hasher struct { - h func() hash.Hash // hash function factory -} +type SignatureHasher = v1.SignatureHasher // NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) { - h := &hasher{} - - switch alg { - case MD5: - h.h = md5.New - case SHA1: - h.h = sha1.New - case SHA224: - h.h = sha256.New224 - case SHA256: - h.h = sha256.New - case SHA384: - h.h = sha512.New384 - case SHA512: - h.h = sha512.New - case SHA512224: - h.h = sha512.New512_224 - case SHA512256: - h.h = sha512.New512_256 - default: - return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg) - } - - return h, nil -} - -// HashFile hashes the file content, JSON or binary, both in golang native format. -func (h *hasher) HashFile(v interface{}) ([]byte, error) { - hf := h.h() - walk(v, hf) - return hf.Sum(nil), nil -} - -// walk hashes the file content, JSON or binary, both in golang native format. -// -// Computation for unstructured documents is a hash of the document. -// -// Computation for the types of structured JSON document is as follows: -// -// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }. -// -// array: Hash [, then digest of the value, then comma (between items) and finally ]. -func walk(v interface{}, h io.Writer) { - - switch x := v.(type) { - case map[string]interface{}: - _, _ = h.Write([]byte("{")) - - var keys []string - for k := range x { - keys = append(keys, k) - } - sort.Strings(keys) - - for i, key := range keys { - if i > 0 { - _, _ = h.Write([]byte(",")) - } - - _, _ = h.Write(encodePrimitive(key)) - _, _ = h.Write([]byte(":")) - walk(x[key], h) - } - - _, _ = h.Write([]byte("}")) - case []interface{}: - _, _ = h.Write([]byte("[")) - - for i, e := range x { - if i > 0 { - _, _ = h.Write([]byte(",")) - } - walk(e, h) - } - - _, _ = h.Write([]byte("]")) - case []byte: - _, _ = h.Write(x) - default: - _, _ = h.Write(encodePrimitive(x)) - } -} - -func encodePrimitive(v interface{}) []byte { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - encoder.SetEscapeHTML(false) - _ = encoder.Encode(v) - return []byte(strings.Trim(buf.String(), "\n")) + return v1.NewSignatureHasher(alg) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/keys.go b/vendor/github.com/open-policy-agent/opa/bundle/keys.go index 810bee4b726..99f9b0f165a 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/keys.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/keys.go @@ -6,139 +6,25 @@ package bundle import ( - "encoding/pem" - "fmt" - "os" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws/sign" - "github.com/open-policy-agent/opa/keys" - - "github.com/open-policy-agent/opa/util" -) - -const ( - defaultTokenSigningAlg = "RS256" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // KeyConfig holds the keys used to sign or verify bundles and tokens // Moved to own package, alias kept for backwards compatibility -type KeyConfig = keys.Config +type KeyConfig = v1.KeyConfig // VerificationConfig represents the key configuration used to verify a signed bundle -type VerificationConfig struct { - PublicKeys map[string]*KeyConfig - KeyID string `json:"keyid"` - Scope string `json:"scope"` - Exclude []string `json:"exclude_files"` -} +type VerificationConfig = v1.VerificationConfig // NewVerificationConfig return a new VerificationConfig func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig { - return &VerificationConfig{ - PublicKeys: keys, - KeyID: id, - Scope: scope, - Exclude: exclude, - } -} - -// ValidateAndInjectDefaults validates the config and inserts default values -func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error { - vc.PublicKeys = keys - - if vc.KeyID != "" { - found := false - for key := range keys { - if key == vc.KeyID { - found = true - break - } - } - - if !found { - return fmt.Errorf("key id %s not found", vc.KeyID) - } - } - return nil -} - -// GetPublicKey returns the public key corresponding to the given key id -func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) { - var kc *KeyConfig - var ok bool - - if kc, ok = vc.PublicKeys[id]; !ok { - return nil, fmt.Errorf("verification key corresponding to ID %v not found", id) - } - return kc, nil + return v1.NewVerificationConfig(keys, id, scope, exclude) } // SigningConfig represents the key configuration used to generate a signed bundle -type SigningConfig struct { - Plugin string - Key string - Algorithm string - ClaimsPath string -} +type SigningConfig = v1.SigningConfig // NewSigningConfig return a new SigningConfig func NewSigningConfig(key, alg, claimsPath string) *SigningConfig { - if alg == "" { - alg = defaultTokenSigningAlg - } - - return &SigningConfig{ - Plugin: defaultSignerID, - Key: key, - Algorithm: alg, - ClaimsPath: claimsPath, - } -} - -// WithPlugin sets the signing plugin in the signing config -func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig { - if plugin != "" { - s.Plugin = plugin - } - return s -} - -// GetPrivateKey returns the private key or secret from the signing config -func (s *SigningConfig) GetPrivateKey() (interface{}, error) { - - block, _ := pem.Decode([]byte(s.Key)) - if block != nil { - return sign.GetSigningKey(s.Key, jwa.SignatureAlgorithm(s.Algorithm)) - } - - var priv string - if _, err := os.Stat(s.Key); err == nil { - bs, err := os.ReadFile(s.Key) - if err != nil { - return nil, err - } - priv = string(bs) - } else if os.IsNotExist(err) { - priv = s.Key - } else { - return nil, err - } - - return sign.GetSigningKey(priv, jwa.SignatureAlgorithm(s.Algorithm)) -} - -// GetClaims returns the claims by reading the file specified in the signing config -func (s *SigningConfig) GetClaims() (map[string]interface{}, error) { - var claims map[string]interface{} - - bs, err := os.ReadFile(s.ClaimsPath) - if err != nil { - return claims, err - } - - if err := util.UnmarshalJSON(bs, &claims); err != nil { - return claims, err - } - return claims, nil + return v1.NewSigningConfig(key, alg, claimsPath) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/sign.go b/vendor/github.com/open-policy-agent/opa/bundle/sign.go index cf9a3e183a9..56e25eec9c1 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/sign.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/sign.go @@ -6,130 +6,30 @@ package bundle import ( - "crypto/rand" - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const defaultSignerID = "_default" - -var signers map[string]Signer - // Signer is the interface expected for implementations that generate bundle signatures. -type Signer interface { - GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error) -} +type Signer v1.Signer // GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified // in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates // a signed token given the list of files to be included in the payload and the bundle // signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token. func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { - var plugin string - // for backwards compatibility, check if there is no plugin specified, and use default - if sc.Plugin == "" { - plugin = defaultSignerID - } else { - plugin = sc.Plugin - } - signer, err := GetSigner(plugin) - if err != nil { - return "", err - } - return signer.GenerateSignedToken(files, sc, keyID) + return v1.GenerateSignedToken(files, sc, keyID) } // DefaultSigner is the default bundle signing implementation. It signs bundles by generating // a JWT and signing it using a locally-accessible private key. -type DefaultSigner struct{} - -// GenerateSignedToken generates a signed token given the list of files to be -// included in the payload and the bundle signing config. The keyID if non-empty, -// represents the value for the "keyid" claim in the token -func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { - payload, err := generatePayload(files, sc, keyID) - if err != nil { - return "", err - } - - privateKey, err := sc.GetPrivateKey() - if err != nil { - return "", err - } - - var headers jws.StandardHeaders - - if err := headers.Set(jws.AlgorithmKey, jwa.SignatureAlgorithm(sc.Algorithm)); err != nil { - return "", err - } - - if keyID != "" { - if err := headers.Set(jws.KeyIDKey, keyID); err != nil { - return "", err - } - } - - hdr, err := json.Marshal(headers) - if err != nil { - return "", err - } - - token, err := jws.SignLiteral(payload, - jwa.SignatureAlgorithm(sc.Algorithm), - privateKey, - hdr, - rand.Reader) - if err != nil { - return "", err - } - return string(token), nil -} - -func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte, error) { - payload := make(map[string]interface{}) - payload["files"] = files - - if sc.ClaimsPath != "" { - claims, err := sc.GetClaims() - if err != nil { - return nil, err - } - - for claim, value := range claims { - payload[claim] = value - } - } else { - if keyID != "" { - // keyid claim is deprecated but include it for backwards compatibility. - payload["keyid"] = keyID - } - } - return json.Marshal(payload) -} +type DefaultSigner v1.DefaultSigner // GetSigner returns the Signer registered under the given id func GetSigner(id string) (Signer, error) { - signer, ok := signers[id] - if !ok { - return nil, fmt.Errorf("no signer exists under id %s", id) - } - return signer, nil + return v1.GetSigner(id) } // RegisterSigner registers a Signer under the given id func RegisterSigner(id string, s Signer) error { - if id == defaultSignerID { - return fmt.Errorf("signer id %s is reserved, use a different id", id) - } - signers[id] = s - return nil -} - -func init() { - signers = map[string]Signer{ - defaultSignerID: &DefaultSigner{}, - } + return v1.RegisterSigner(id, s) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/store.go b/vendor/github.com/open-policy-agent/opa/bundle/store.go index 9a49f025e8e..d73cc774225 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/store.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/store.go @@ -6,1031 +6,118 @@ package bundle import ( "context" - "encoding/base64" - "encoding/json" - "fmt" - "path/filepath" - "strings" - "github.com/open-policy-agent/opa/ast" - iCompiler "github.com/open-policy-agent/opa/internal/compiler" - "github.com/open-policy-agent/opa/internal/json/patch" - "github.com/open-policy-agent/opa/metrics" "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // BundlesBasePath is the storage path used for storing bundle metadata -var BundlesBasePath = storage.MustParsePath("/system/bundles") +var BundlesBasePath = v1.BundlesBasePath // Note: As needed these helpers could be memoized. // ManifestStoragePath is the storage path used for the given named bundle manifest. func ManifestStoragePath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest") + return v1.ManifestStoragePath(name) } // EtagStoragePath is the storage path used for the given named bundle etag. func EtagStoragePath(name string) storage.Path { - return append(BundlesBasePath, name, "etag") -} - -func namedBundlePath(name string) storage.Path { - return append(BundlesBasePath, name) -} - -func rootsPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "roots") -} - -func revisionPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "revision") -} - -func wasmModulePath(name string) storage.Path { - return append(BundlesBasePath, name, "wasm") -} - -func wasmEntrypointsPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "wasm") -} - -func metadataPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "metadata") -} - -func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (interface{}, error) { - value, err := store.Read(ctx, txn, path) - if err != nil { - return nil, err - } - - if astValue, ok := value.(ast.Value); ok { - value, err = ast.JSON(astValue) - if err != nil { - return nil, err - } - } - - return value, nil + return v1.EtagStoragePath(name) } // ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored. func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) { - value, err := read(ctx, store, txn, BundlesBasePath) - if err != nil { - return nil, err - } - - bundleMap, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt manifest roots") - } - - bundles := make([]string, len(bundleMap)) - idx := 0 - for name := range bundleMap { - bundles[idx] = name - idx++ - } - return bundles, nil + return v1.ReadBundleNamesFromStore(ctx, store, txn) } // WriteManifestToStore will write the manifest into the storage. This function is called when // the bundle is activated. func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, manifest Manifest) error { - return write(ctx, store, txn, ManifestStoragePath(name), manifest) + return v1.WriteManifestToStore(ctx, store, txn, name, manifest) } // WriteEtagToStore will write the bundle etag into the storage. This function is called when the bundle is activated. func WriteEtagToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name, etag string) error { - return write(ctx, store, txn, EtagStoragePath(name), etag) -} - -func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value interface{}) error { - if err := util.RoundTrip(&value); err != nil { - return err - } - - var dir []string - if len(path) > 1 { - dir = path[:len(path)-1] - } - - if err := storage.MakeDir(ctx, store, txn, dir); err != nil { - return err - } - - return store.Write(ctx, txn, storage.AddOp, path, value) + return v1.WriteEtagToStore(ctx, store, txn, name, etag) } // EraseManifestFromStore will remove the manifest from storage. This function is called // when the bundle is deactivated. func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { - path := namedBundlePath(name) - err := store.Write(ctx, txn, storage.RemoveOp, path, nil) - return suppressNotFound(err) -} - -// eraseBundleEtagFromStore will remove the bundle etag from storage. This function is called -// when the bundle is deactivated. -func eraseBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { - path := EtagStoragePath(name) - err := store.Write(ctx, txn, storage.RemoveOp, path, nil) - return suppressNotFound(err) -} - -func suppressNotFound(err error) error { - if err == nil || storage.IsNotFound(err) { - return nil - } - return err -} - -func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error { - basePath := wasmModulePath(name) - for _, wm := range b.WasmModules { - path := append(basePath, wm.Path) - err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw)) - if err != nil { - return err - } - } - return nil -} - -func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { - path := wasmModulePath(name) - - err := store.Write(ctx, txn, storage.RemoveOp, path, nil) - return suppressNotFound(err) -} - -// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store. -func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) { - path := wasmEntrypointsPath(name) - value, err := read(ctx, store, txn, path) - if err != nil { - return nil, err - } - - bs, err := json.Marshal(value) - if err != nil { - return nil, fmt.Errorf("corrupt wasm manifest data") - } - - var wasmMetadata []WasmResolver - - err = util.UnmarshalJSON(bs, &wasmMetadata) - if err != nil { - return nil, fmt.Errorf("corrupt wasm manifest data") - } - - return wasmMetadata, nil + return v1.EraseManifestFromStore(ctx, store, txn, name) } // ReadWasmModulesFromStore will write Wasm module resolver metadata from the store. func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) { - path := wasmModulePath(name) - value, err := read(ctx, store, txn, path) - if err != nil { - return nil, err - } - - encodedModules, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt wasm modules") - } - - rawModules := map[string][]byte{} - for path, enc := range encodedModules { - encStr, ok := enc.(string) - if !ok { - return nil, fmt.Errorf("corrupt wasm modules") - } - bs, err := base64.StdEncoding.DecodeString(encStr) - if err != nil { - return nil, err - } - rawModules[path] = bs - } - return rawModules, nil + return v1.ReadWasmModulesFromStore(ctx, store, txn, name) } // ReadBundleRootsFromStore returns the roots in the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) { - value, err := read(ctx, store, txn, rootsPath(name)) - if err != nil { - return nil, err - } - - sl, ok := value.([]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt manifest roots") - } - - roots := make([]string, len(sl)) - - for i := range sl { - roots[i], ok = sl[i].(string) - if !ok { - return nil, fmt.Errorf("corrupt manifest root") - } - } - - return roots, nil + return v1.ReadBundleRootsFromStore(ctx, store, txn, name) } // ReadBundleRevisionFromStore returns the revision in the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { - return readRevisionFromStore(ctx, store, txn, revisionPath(name)) -} - -func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { - value, err := read(ctx, store, txn, path) - if err != nil { - return "", err - } - - str, ok := value.(string) - if !ok { - return "", fmt.Errorf("corrupt manifest revision") - } - - return str, nil + return v1.ReadBundleRevisionFromStore(ctx, store, txn, name) } // ReadBundleMetadataFromStore returns the metadata in the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]interface{}, error) { - return readMetadataFromStore(ctx, store, txn, metadataPath(name)) -} - -func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]interface{}, error) { - value, err := read(ctx, store, txn, path) - if err != nil { - return nil, suppressNotFound(err) - } - - data, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt manifest metadata") - } - - return data, nil + return v1.ReadBundleMetadataFromStore(ctx, store, txn, name) } // ReadBundleEtagFromStore returns the etag for the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { - return readEtagFromStore(ctx, store, txn, EtagStoragePath(name)) -} - -func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { - value, err := read(ctx, store, txn, path) - if err != nil { - return "", err - } - - str, ok := value.(string) - if !ok { - return "", fmt.Errorf("corrupt bundle etag") - } - - return str, nil + return v1.ReadBundleEtagFromStore(ctx, store, txn, name) } // ActivateOpts defines options for the Activate API call. -type ActivateOpts struct { - Ctx context.Context - Store storage.Store - Txn storage.Transaction - TxnCtx *storage.Context - Compiler *ast.Compiler - Metrics metrics.Metrics - Bundles map[string]*Bundle // Optional - ExtraModules map[string]*ast.Module // Optional - AuthorizationDecisionRef ast.Ref - ParserOptions ast.ParserOptions - - legacy bool -} +type ActivateOpts = v1.ActivateOpts // Activate the bundle(s) by loading into the given Store. This will load policies, data, and record // the manifest in storage. The compiler provided will have had the polices compiled on it. func Activate(opts *ActivateOpts) error { - opts.legacy = false - return activateBundles(opts) + return v1.Activate(opts) } // DeactivateOpts defines options for the Deactivate API call -type DeactivateOpts struct { - Ctx context.Context - Store storage.Store - Txn storage.Transaction - BundleNames map[string]struct{} - ParserOptions ast.ParserOptions -} +type DeactivateOpts = v1.DeactivateOpts // Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store. func Deactivate(opts *DeactivateOpts) error { - erase := map[string]struct{}{} - for name := range opts.BundleNames { - roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) - if suppressNotFound(err) != nil { - return err - } - for _, root := range roots { - erase[root] = struct{}{} - } - } - _, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, opts.BundleNames, erase) - return err -} - -func activateBundles(opts *ActivateOpts) error { - - // Build collections of bundle names, modules, and roots to erase - erase := map[string]struct{}{} - names := map[string]struct{}{} - deltaBundles := map[string]*Bundle{} - snapshotBundles := map[string]*Bundle{} - - for name, b := range opts.Bundles { - if b.Type() == DeltaBundleType { - deltaBundles[name] = b - } else { - snapshotBundles[name] = b - names[name] = struct{}{} - - roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) - if suppressNotFound(err) != nil { - return err - } - for _, root := range roots { - erase[root] = struct{}{} - } - - // Erase data at new roots to prepare for writing the new data - for _, root := range *b.Manifest.Roots { - erase[root] = struct{}{} - } - } - } - - // Before changing anything make sure the roots don't collide with any - // other bundles that already are activated or other bundles being activated. - err := hasRootsOverlap(opts.Ctx, opts.Store, opts.Txn, opts.Bundles) - if err != nil { - return err - } - - if len(deltaBundles) != 0 { - err := activateDeltaBundles(opts, deltaBundles) - if err != nil { - return err - } - } - - // Erase data and policies at new + old roots, and remove the old - // manifests before activating a new snapshot bundle. - remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, names, erase) - if err != nil { - return err - } - - // Validate data in bundle does not contain paths outside the bundle's roots. - for _, b := range snapshotBundles { - - if b.lazyLoadingMode { - - for _, item := range b.Raw { - path := filepath.ToSlash(item.Path) - - if filepath.Base(path) == dataFile || filepath.Base(path) == yamlDataFile { - var val map[string]json.RawMessage - err = util.Unmarshal(item.Value, &val) - if err == nil { - err = doDFS(val, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) - if err != nil { - return err - } - } else { - // Build an object for the value - p := getNormalizedPath(path) - - if len(p) == 0 { - return fmt.Errorf("root value must be object") - } - - // verify valid YAML or JSON value - var x interface{} - err := util.Unmarshal(item.Value, &x) - if err != nil { - return err - } - - value := item.Value - dir := map[string]json.RawMessage{} - for i := len(p) - 1; i > 0; i-- { - dir[p[i]] = value - - bs, err := json.Marshal(dir) - if err != nil { - return err - } - - value = bs - dir = map[string]json.RawMessage{} - } - dir[p[0]] = value - - err = doDFS(dir, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) - if err != nil { - return err - } - } - } - } - } - } - - // Compile the modules all at once to avoid having to re-do work. - remainingAndExtra := make(map[string]*ast.Module) - for name, mod := range remaining { - remainingAndExtra[name] = mod - } - for name, mod := range opts.ExtraModules { - remainingAndExtra[name] = mod - } - - err = compileModules(opts.Compiler, opts.Metrics, snapshotBundles, remainingAndExtra, opts.legacy, opts.AuthorizationDecisionRef) - if err != nil { - return err - } - - if err := writeDataAndModules(opts.Ctx, opts.Store, opts.Txn, opts.TxnCtx, snapshotBundles, opts.legacy); err != nil { - return err - } - - if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { - return err - } - - for name, b := range snapshotBundles { - if err := writeManifestToStore(opts, name, b.Manifest); err != nil { - return err - } - - if err := writeEtagToStore(opts, name, b.Etag); err != nil { - return err - } - - if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil { - return err - } - } - - return nil -} - -func doDFS(obj map[string]json.RawMessage, path string, roots []string) error { - if len(roots) == 1 && roots[0] == "" { - return nil - } - - for key := range obj { - - newPath := filepath.Join(strings.Trim(path, "/"), key) - - // Note: filepath.Join can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - newPath = strings.TrimLeft(normalizePath(newPath), "/.") - - contains := false - prefix := false - if RootPathsContain(roots, newPath) { - contains = true - } else { - for i := range roots { - if strings.HasPrefix(strings.Trim(roots[i], "/"), newPath) { - prefix = true - break - } - } - } - - if !contains && !prefix { - return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) - } - - if contains { - continue - } - - var next map[string]json.RawMessage - err := util.Unmarshal(obj[key], &next) - if err != nil { - return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) - } - - if err := doDFS(next, newPath, roots); err != nil { - return err - } - } - return nil -} - -func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error { - - // Check that the manifest roots and wasm resolvers in the delta bundle - // match with those currently in the store - for name, b := range bundles { - value, err := opts.Store.Read(opts.Ctx, opts.Txn, ManifestStoragePath(name)) - if err != nil { - if storage.IsNotFound(err) { - continue - } - return err - } - - manifest, err := valueToManifest(value) - if err != nil { - return fmt.Errorf("corrupt manifest data: %w", err) - } - - if !b.Manifest.equalWasmResolversAndRoots(manifest) { - return fmt.Errorf("delta bundle '%s' has wasm resolvers or manifest roots that are different from those in the store", name) - } - } - - for _, b := range bundles { - err := applyPatches(opts.Ctx, opts.Store, opts.Txn, b.Patch.Data) - if err != nil { - return err - } - } - - if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { - return err - } - - for name, b := range bundles { - if err := writeManifestToStore(opts, name, b.Manifest); err != nil { - return err - } - - if err := writeEtagToStore(opts, name, b.Etag); err != nil { - return err - } - } - - return nil -} - -func valueToManifest(v interface{}) (Manifest, error) { - if astV, ok := v.(ast.Value); ok { - var err error - v, err = ast.JSON(astV) - if err != nil { - return Manifest{}, err - } - } - - var manifest Manifest - - bs, err := json.Marshal(v) - if err != nil { - return Manifest{}, err - } - - err = util.UnmarshalJSON(bs, &manifest) - if err != nil { - return Manifest{}, err - } - - return manifest, nil -} - -// erase bundles by name and roots. This will clear all policies and data at its roots and remove its -// manifest from storage. -func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) { - - if err := eraseData(ctx, store, txn, roots); err != nil { - return nil, err - } - - remaining, err := erasePolicies(ctx, store, txn, parserOpts, roots) - if err != nil { - return nil, err - } - - for name := range names { - if err := EraseManifestFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { - return nil, err - } - - if err := LegacyEraseManifestFromStore(ctx, store, txn); suppressNotFound(err) != nil { - return nil, err - } - - if err := eraseBundleEtagFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { - return nil, err - } - - if err := eraseWasmModulesFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { - return nil, err - } - } - - return remaining, nil -} - -func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) error { - for root := range roots { - path, ok := storage.ParsePathEscaped("/" + root) - if !ok { - return fmt.Errorf("manifest root path invalid: %v", root) - } - - if len(path) > 0 { - if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); suppressNotFound(err) != nil { - return err - } - } - } - return nil -} - -func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, error) { - - ids, err := store.ListPolicies(ctx, txn) - if err != nil { - return nil, err - } - - remaining := map[string]*ast.Module{} - - for _, id := range ids { - bs, err := store.GetPolicy(ctx, txn, id) - if err != nil { - return nil, err - } - module, err := ast.ParseModuleWithOpts(id, string(bs), parserOpts) - if err != nil { - return nil, err - } - path, err := module.Package.Path.Ptr() - if err != nil { - return nil, err - } - deleted := false - for root := range roots { - if RootPathsContain([]string{root}, path) { - if err := store.DeletePolicy(ctx, txn, id); err != nil { - return nil, err - } - deleted = true - break - } - } - if !deleted { - remaining[id] = module - } - } - - return remaining, nil -} - -func writeManifestToStore(opts *ActivateOpts, name string, manifest Manifest) error { - // Always write manifests to the named location. If the plugin is in the older style config - // then also write to the old legacy unnamed location. - if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, manifest); err != nil { - return err - } - - if opts.legacy { - if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, manifest); err != nil { - return err - } - } - - return nil -} - -func writeEtagToStore(opts *ActivateOpts, name, etag string) error { - if err := WriteEtagToStore(opts.Ctx, opts.Store, opts.Txn, name, etag); err != nil { - return err - } - - return nil -} - -func writeDataAndModules(ctx context.Context, store storage.Store, txn storage.Transaction, txnCtx *storage.Context, bundles map[string]*Bundle, legacy bool) error { - params := storage.WriteParams - params.Context = txnCtx - - for name, b := range bundles { - if len(b.Raw) == 0 { - // Write data from each new bundle into the store. Only write under the - // roots contained in their manifest. - if err := writeData(ctx, store, txn, *b.Manifest.Roots, b.Data); err != nil { - return err - } - - for _, mf := range b.Modules { - var path string - - // For backwards compatibility, in legacy mode, upsert policies to - // the unprefixed path. - if legacy { - path = mf.Path - } else { - path = modulePathWithPrefix(name, mf.Path) - } - - if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { - return err - } - } - } else { - params.BasePaths = *b.Manifest.Roots - - err := store.Truncate(ctx, txn, params, NewIterator(b.Raw)) - if err != nil { - return fmt.Errorf("store truncate failed for bundle '%s': %v", name, err) - } - } - } - - return nil -} - -func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]interface{}) error { - for _, root := range roots { - path, ok := storage.ParsePathEscaped("/" + root) - if !ok { - return fmt.Errorf("manifest root path invalid: %v", root) - } - if value, ok := lookup(path, data); ok { - if len(path) > 0 { - if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { - return err - } - } - if err := store.Write(ctx, txn, storage.AddOp, path, value); err != nil { - return err - } - } - } - return nil -} - -func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool, authorizationDecisionRef ast.Ref) error { - - m.Timer(metrics.RegoModuleCompile).Start() - defer m.Timer(metrics.RegoModuleCompile).Stop() - - modules := map[string]*ast.Module{} - - // preserve any modules already on the compiler - for name, module := range compiler.Modules { - modules[name] = module - } - - // preserve any modules passed in from the store - for name, module := range extraModules { - modules[name] = module - } - - // include all the new bundle modules - for bundleName, b := range bundles { - if legacy { - for _, mf := range b.Modules { - modules[mf.Path] = mf.Parsed - } - } else { - for name, module := range b.ParsedModules(bundleName) { - modules[name] = module - } - } - } - - if compiler.Compile(modules); compiler.Failed() { - return compiler.Errors - } - - if authorizationDecisionRef.Equal(ast.EmptyRef()) { - return nil - } - - return iCompiler.VerifyAuthorizationPolicySchema(compiler, authorizationDecisionRef) + return v1.Deactivate(opts) } -func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error { - - m.Timer(metrics.RegoModuleCompile).Start() - defer m.Timer(metrics.RegoModuleCompile).Stop() - - modules := map[string]*ast.Module{} - - // preserve any modules already on the compiler - for name, module := range compiler.Modules { - modules[name] = module - } - - // preserve any modules passed in from the store - for name, module := range extraModules { - modules[name] = module - } - - // include all the new bundle modules - for bundleName, b := range bundles { - if legacy { - for _, mf := range b.Modules { - modules[mf.Path] = mf.Parsed - } - } else { - for name, module := range b.ParsedModules(bundleName) { - modules[name] = module - } - } - } - - if compiler.Compile(modules); compiler.Failed() { - return compiler.Errors - } - for bundleName, b := range bundles { - for _, mf := range b.Modules { - var path string - - // For backwards compatibility, in legacy mode, upsert policies to - // the unprefixed path. - if legacy { - path = mf.Path - } else { - path = modulePathWithPrefix(bundleName, mf.Path) - } - - if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { - return err - } - } - } - return nil -} - -func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) { - if len(path) == 0 { - return data, true - } - for i := 0; i < len(path)-1; i++ { - value, ok := data[path[i]] - if !ok { - return nil, false - } - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, false - } - data = obj - } - value, ok := data[path[len(path)-1]] - return value, ok -} - -func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, bundles map[string]*Bundle) error { - collisions := map[string][]string{} - allBundles, err := ReadBundleNamesFromStore(ctx, store, txn) - if suppressNotFound(err) != nil { - return err - } - - allRoots := map[string][]string{} - - // Build a map of roots for existing bundles already in the system - for _, name := range allBundles { - roots, err := ReadBundleRootsFromStore(ctx, store, txn, name) - if suppressNotFound(err) != nil { - return err - } - allRoots[name] = roots - } - - // Add in any bundles that are being activated, overwrite existing roots - // with new ones where bundles are in both groups. - for name, bundle := range bundles { - allRoots[name] = *bundle.Manifest.Roots - } - - // Now check for each new bundle if it conflicts with any of the others - for name, bundle := range bundles { - for otherBundle, otherRoots := range allRoots { - if name == otherBundle { - // Skip the current bundle being checked - continue - } - - // Compare the "new" roots with other existing (or a different bundles new roots) - for _, newRoot := range *bundle.Manifest.Roots { - for _, otherRoot := range otherRoots { - if RootPathsOverlap(newRoot, otherRoot) { - collisions[otherBundle] = append(collisions[otherBundle], newRoot) - } - } - } - } - } - - if len(collisions) > 0 { - var bundleNames []string - for name := range collisions { - bundleNames = append(bundleNames, name) - } - return fmt.Errorf("detected overlapping roots in bundle manifest with: %s", bundleNames) - } - return nil -} - -func applyPatches(ctx context.Context, store storage.Store, txn storage.Transaction, patches []PatchOperation) error { - for _, pat := range patches { - - // construct patch path - path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/")) - if !ok { - return fmt.Errorf("error parsing patch path") - } - - var op storage.PatchOp - switch pat.Op { - case "upsert": - op = storage.AddOp - - _, err := store.Read(ctx, txn, path[:len(path)-1]) - if err != nil { - if !storage.IsNotFound(err) { - return err - } - - if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { - return err - } - } - case "remove": - op = storage.RemoveOp - case "replace": - op = storage.ReplaceOp - default: - return fmt.Errorf("bad patch operation: %v", pat.Op) - } - - // apply the patch - if err := store.Write(ctx, txn, op, path, pat.Value); err != nil { - return err - } - } - - return nil -} - -// Helpers for the older single (unnamed) bundle style manifest storage. - -// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored. -// Deprecated: Use ManifestStoragePath and named bundles instead. -var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest") -var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision") - // LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location. // Deprecated: Use WriteManifestToStore and named bundles instead. func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error { - return write(ctx, store, txn, legacyManifestStoragePath, manifest) + return v1.LegacyWriteManifestToStore(ctx, store, txn, manifest) } // LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location. // Deprecated: Use WriteManifestToStore and named bundles instead. func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error { - err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil) - if err != nil { - return err - } - return nil + return v1.LegacyEraseManifestFromStore(ctx, store, txn) } // LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location. // Deprecated: Use ReadBundleRevisionFromStore and named bundles instead. func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) { - return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath) + return v1.LegacyReadRevisionFromStore(ctx, store, txn) } // ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location. // Deprecated: Use Activate with named bundles instead. func ActivateLegacy(opts *ActivateOpts) error { - opts.legacy = true - return activateBundles(opts) + return v1.ActivateLegacy(opts) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/verify.go b/vendor/github.com/open-policy-agent/opa/bundle/verify.go index e85be835be9..ef2e1e32db0 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/verify.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/verify.go @@ -6,26 +6,11 @@ package bundle import ( - "bytes" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws" - "github.com/open-policy-agent/opa/internal/jwx/jws/verify" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const defaultVerifierID = "_default" - -var verifiers map[string]Verifier - // Verifier is the interface expected for implementations that verify bundle signatures. -type Verifier interface { - VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error) -} +type Verifier v1.Verifier // VerifyBundleSignature will retrieve the Verifier implementation based // on the Plugin specified in SignaturesConfig, and call its implementation @@ -33,199 +18,19 @@ type Verifier interface { // using the given public keys or secret. If a signature is verified, it keeps // track of the files specified in the JWT payload func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { - // default implementation does not return a nil for map, so don't - // do it here either - files := make(map[string]FileInfo) - var plugin string - // for backwards compatibility, check if there is no plugin specified, and use default - if sc.Plugin == "" { - plugin = defaultVerifierID - } else { - plugin = sc.Plugin - } - verifier, err := GetVerifier(plugin) - if err != nil { - return files, err - } - return verifier.VerifyBundleSignature(sc, bvc) + return v1.VerifyBundleSignature(sc, bvc) } // DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking // the JWT signature using a locally-accessible public key. -type DefaultVerifier struct{} - -// VerifyBundleSignature verifies the bundle signature using the given public keys or secret. -// If a signature is verified, it keeps track of the files specified in the JWT payload -func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { - files := make(map[string]FileInfo) - - if len(sc.Signatures) == 0 { - return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)") - } - - if len(sc.Signatures) > 1 { - return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)") - } - - for _, token := range sc.Signatures { - payload, err := verifyJWTSignature(token, bvc) - if err != nil { - return files, err - } - - for _, file := range payload.Files { - files[file.Name] = file - } - } - return files, nil -} - -func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) { - // decode JWT to check if the header specifies the key to use and/or if claims have the scope. - - parts, err := jws.SplitCompact(token) - if err != nil { - return nil, err - } - - var decodedHeader []byte - if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil { - return nil, fmt.Errorf("failed to base64 decode JWT headers: %w", err) - } - - var hdr jws.StandardHeaders - if err := json.Unmarshal(decodedHeader, &hdr); err != nil { - return nil, fmt.Errorf("failed to parse JWT headers: %w", err) - } - - payload, err := base64.RawURLEncoding.DecodeString(parts[1]) - if err != nil { - return nil, err - } - - var ds DecodedSignature - if err := json.Unmarshal(payload, &ds); err != nil { - return nil, err - } - - // check for the id of the key to use for JWT signature verification - // first in the OPA config. If not found, then check the JWT kid. - keyID := bvc.KeyID - if keyID == "" { - keyID = hdr.KeyID - } - if keyID == "" { - // If header has no key id, check the deprecated key claim. - keyID = ds.KeyID - } - - if keyID == "" { - return nil, fmt.Errorf("verification key ID is empty") - } - - // now that we have the keyID, fetch the actual key - keyConfig, err := bvc.GetPublicKey(keyID) - if err != nil { - return nil, err - } - - // verify JWT signature - alg := jwa.SignatureAlgorithm(keyConfig.Algorithm) - key, err := verify.GetSigningKey(keyConfig.Key, alg) - if err != nil { - return nil, err - } - - _, err = jws.Verify([]byte(token), alg, key) - if err != nil { - return nil, err - } - - // verify the scope - scope := bvc.Scope - if scope == "" { - scope = keyConfig.Scope - } - - if ds.Scope != scope { - return nil, fmt.Errorf("scope mismatch") - } - return &ds, nil -} - -// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature -func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error { - var file FileInfo - var ok bool - - if file, ok = files[path]; !ok { - return fmt.Errorf("file %v not included in bundle signature", path) - } - - if file.Algorithm == "" { - return fmt.Errorf("no hashing algorithm provided for file %v", path) - } - - hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm)) - if err != nil { - return err - } - - // hash the file content - // For unstructured files, hash the byte stream of the file - // For structured files, read the byte stream and parse into a JSON structure; - // then recursively order the fields of all objects alphabetically and then apply - // the hash function to result to compute the hash. This ensures that the digital signature is - // independent of whitespace and other non-semantic JSON features. - var value interface{} - if IsStructuredDoc(path) { - err := util.Unmarshal(data.Bytes(), &value) - if err != nil { - return err - } - } else { - value = data.Bytes() - } - - bs, err := hash.HashFile(value) - if err != nil { - return err - } - - // compare file hash with same file in the JWT payloads - fb, err := hex.DecodeString(file.Hash) - if err != nil { - return err - } - - if !bytes.Equal(fb, bs) { - return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs) - } - - delete(files, path) - return nil -} +type DefaultVerifier = v1.DefaultVerifier // GetVerifier returns the Verifier registered under the given id func GetVerifier(id string) (Verifier, error) { - verifier, ok := verifiers[id] - if !ok { - return nil, fmt.Errorf("no verifier exists under id %s", id) - } - return verifier, nil + return v1.GetVerifier(id) } // RegisterVerifier registers a Verifier under the given id func RegisterVerifier(id string, v Verifier) error { - if id == defaultVerifierID { - return fmt.Errorf("verifier id %s is reserved, use a different id", id) - } - verifiers[id] = v - return nil -} - -func init() { - verifiers = map[string]Verifier{ - defaultVerifierID: &DefaultVerifier{}, - } + return v1.RegisterVerifier(id, v) } diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/doc.go b/vendor/github.com/open-policy-agent/opa/capabilities/doc.go new file mode 100644 index 00000000000..189c2e727ac --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package capabilities diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json new file mode 100644 index 00000000000..48a87b0c35d --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json @@ -0,0 +1,4835 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json new file mode 100644 index 00000000000..48a87b0c35d --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json @@ -0,0 +1,4835 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json new file mode 100644 index 00000000000..48a87b0c35d --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json @@ -0,0 +1,4835 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json new file mode 100644 index 00000000000..1253c88b307 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json new file mode 100644 index 00000000000..1253c88b307 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json new file mode 100644 index 00000000000..1253c88b307 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json new file mode 100644 index 00000000000..1253c88b307 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json new file mode 100644 index 00000000000..1253c88b307 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go index 064649733a4..836aa586b93 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go +++ b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go @@ -6,15 +6,16 @@ package bundle import ( "context" + "errors" "fmt" "io" "os" "path/filepath" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/bundle" - "github.com/open-policy-agent/opa/resolver/wasm" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/resolver/wasm" + "github.com/open-policy-agent/opa/v1/storage" ) // LoadWasmResolversFromStore will lookup all Wasm modules from the store along with the @@ -97,7 +98,7 @@ func LoadBundleFromDiskForRegoVersion(regoVersion ast.RegoVersion, path, name st _, err := os.Stat(bundlePath) if err == nil { - f, err := os.Open(filepath.Join(bundlePath)) + f, err := os.Open(bundlePath) if err != nil { return nil, err } @@ -132,7 +133,7 @@ func SaveBundleToDisk(path string, raw io.Reader) (string, error) { } if raw == nil { - return "", fmt.Errorf("no raw bundle bytes to persist to disk") + return "", errors.New("no raw bundle bytes to persist to disk") } dest, err := os.CreateTemp(path, ".bundle.tar.gz.*.tmp") diff --git a/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go b/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go index a019cde1283..c2392b67754 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go +++ b/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go @@ -114,7 +114,7 @@ func GetAddressRange(ipNet net.IPNet) (net.IP, net.IP) { copy(lastIPMask, ipNet.Mask) for i := range lastIPMask { lastIPMask[len(lastIPMask)-i-1] = ^lastIPMask[len(lastIPMask)-i-1] - lastIP[net.IPv6len-i-1] = lastIP[net.IPv6len-i-1] | lastIPMask[len(lastIPMask)-i-1] + lastIP[net.IPv6len-i-1] |= lastIPMask[len(lastIPMask)-i-1] } return firstIP, lastIP diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go index 4d80aeeef98..5d2e778b139 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go @@ -5,9 +5,12 @@ package compiler import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/schemas" - "github.com/open-policy-agent/opa/util" + "errors" + "sync" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/schemas" + "github.com/open-policy-agent/opa/v1/util" ) type SchemaFile string @@ -16,12 +19,35 @@ const ( AuthorizationPolicySchema SchemaFile = "authorizationPolicy.json" ) -var schemaDefinitions = map[SchemaFile]interface{}{} +var schemaDefinitions = map[SchemaFile]any{} + +var loadOnce = sync.OnceValue(func() error { + cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema)) + if err != nil { + return err + } + + if len(cont) == 0 { + return errors.New("expected authorization policy schema file to be present") + } + + var schema any + if err := util.Unmarshal(cont, &schema); err != nil { + return err + } + + schemaDefinitions[AuthorizationPolicySchema] = schema + + return nil +}) // VerifyAuthorizationPolicySchema performs type checking on rules against the schema for the Authorization Policy // Input document. // NOTE: The provided compiler should have already run the compilation process on the input modules func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error { + if err := loadOnce(); err != nil { + panic(err) + } rules := getRulesWithDependencies(compiler, ref) @@ -32,7 +58,10 @@ func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error schemaSet := ast.NewSchemaSet() schemaSet.Put(ast.SchemaRootRef, schemaDefinitions[AuthorizationPolicySchema]) - errs := ast.NewCompiler().WithSchemas(schemaSet).PassesTypeCheckRules(rules) + errs := ast.NewCompiler(). + WithDefaultRegoVersion(compiler.DefaultRegoVersion()). + WithSchemas(schemaSet). + PassesTypeCheckRules(rules) if len(errs) > 0 { return errs @@ -64,26 +93,3 @@ func transitiveDependencies(compiler *ast.Compiler, rule *ast.Rule, deps map[*as transitiveDependencies(compiler, other, deps) } } - -func loadAuthorizationPolicySchema() { - - cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema)) - if err != nil { - panic(err) - } - - if len(cont) == 0 { - panic("expected authorization policy schema file to be present") - } - - var schema interface{} - if err := util.Unmarshal(cont, &schema); err != nil { - panic(err) - } - - schemaDefinitions[AuthorizationPolicySchema] = schema -} - -func init() { - loadAuthorizationPolicySchema() -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go index 9a5cebec542..25cbc13b471 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go @@ -12,7 +12,6 @@ import ( "fmt" "io" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/compiler/wasm/opa" "github.com/open-policy-agent/opa/internal/debug" "github.com/open-policy-agent/opa/internal/wasm/encoding" @@ -20,8 +19,9 @@ import ( "github.com/open-policy-agent/opa/internal/wasm/module" "github.com/open-policy-agent/opa/internal/wasm/types" "github.com/open-policy-agent/opa/internal/wasm/util" - "github.com/open-policy-agent/opa/ir" - opatypes "github.com/open-policy-agent/opa/types" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ir" + opatypes "github.com/open-policy-agent/opa/v1/types" ) // Record Wasm ABI version in exported global variable @@ -340,7 +340,7 @@ func (c *Compiler) initModule() error { // two times. But let's deal with that when it happens. if _, ok := c.funcs[name]; ok { // already seen c.debug.Printf("function name duplicate: %s (%d)", name, fn.Index) - name = name + ".1" + name += ".1" } c.funcs[name] = fn.Index } @@ -348,7 +348,7 @@ func (c *Compiler) initModule() error { for _, fn := range c.policy.Funcs.Funcs { params := make([]types.ValueType, len(fn.Params)) - for i := 0; i < len(params); i++ { + for i := range params { params[i] = types.I32 } @@ -827,7 +827,7 @@ func (c *Compiler) compileFunc(fn *ir.Func) error { memoize := len(fn.Params) == 2 if len(fn.Params) == 0 { - return fmt.Errorf("illegal function: zero args") + return errors.New("illegal function: zero args") } c.nextLocal = 0 @@ -996,12 +996,16 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err for _, stmt := range block.Stmts { switch stmt := stmt.(type) { case *ir.ResultSetAddStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.lrs}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Value)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaSetAdd)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.lrs}, + instruction.GetLocal{Index: c.local(stmt.Value)}, + instruction.Call{Index: c.function(opaSetAdd)}, + ) case *ir.ReturnLocalStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)}) - instrs = append(instrs, instruction.Return{}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Source)}, + instruction.Return{}, + ) case *ir.BlockStmt: for i := range stmt.Blocks { block, err := c.compileBlock(stmt.Blocks[i]) @@ -1029,8 +1033,10 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err return instrs, err } case *ir.AssignVarStmt: - instrs = append(instrs, c.instrRead(stmt.Source)) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + c.instrRead(stmt.Source), + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.AssignVarOnceStmt: instrs = append(instrs, instruction.Block{ Instrs: []instruction.Instruction{ @@ -1360,7 +1366,7 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _ // Initialize the locals that specify the path of the upsert operation. lpath := make(map[int]uint32, len(path)) - for i := 0; i < len(path); i++ { + for i := range path { lpath[i] = c.genLocal() instrs = append(instrs, instruction.I32Const{Value: c.opaStringAddr(path[i])}) instrs = append(instrs, instruction.SetLocal{Index: lpath[i]}) @@ -1369,10 +1375,10 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _ // Generate a block that traverses the path of the upsert operation, // shallowing copying values at each step as needed. Stop before the final // segment that will only be inserted. - var inner []instruction.Instruction + inner := make([]instruction.Instruction, 0, len(path)*21+1) ltemp := c.genLocal() - for i := 0; i < len(path)-1; i++ { + for i := range len(path) - 1 { // Lookup the next part of the path. inner = append(inner, instruction.GetLocal{Index: lcopy}) @@ -1408,10 +1414,10 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _ inner = append(inner, instruction.Br{Index: uint32(len(path) - 1)}) // Generate blocks that handle missing nodes during traversal. - var block []instruction.Instruction + block := make([]instruction.Instruction, 0, len(path)*10) lval := c.genLocal() - for i := 0; i < len(path)-1; i++ { + for i := range len(path) - 1 { block = append(block, instruction.Block{Instrs: inner}) block = append(block, instruction.Call{Index: c.function(opaObject)}) block = append(block, instruction.SetLocal{Index: lval}) @@ -1535,8 +1541,7 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul } instrs := *result - instrs = append(instrs, instruction.I32Const{Value: ef.ID}) - instrs = append(instrs, instruction.I32Const{Value: 0}) // unused context parameter + instrs = append(instrs, instruction.I32Const{Value: ef.ID}, instruction.I32Const{Value: 0}) // unused context parameter for _, arg := range stmt.Args { instrs = append(instrs, c.instrRead(arg)) @@ -1545,9 +1550,11 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul instrs = append(instrs, instruction.Call{Index: c.function(builtinDispatchers[len(stmt.Args)])}) if ef.Decl.Result() != nil { - instrs = append(instrs, instruction.TeeLocal{Index: c.local(stmt.Result)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.TeeLocal{Index: c.local(stmt.Result)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) } else { instrs = append(instrs, instruction.Drop{}) } @@ -1678,7 +1685,7 @@ func (c *Compiler) genLocal() uint32 { func (c *Compiler) function(name string) uint32 { fidx, ok := c.funcs[name] if !ok { - panic(fmt.Sprintf("function not found: %s", name)) + panic("function not found: " + name) } return fidx } diff --git a/vendor/github.com/open-policy-agent/opa/internal/config/config.go b/vendor/github.com/open-policy-agent/opa/internal/config/config.go index b1a9731f65d..fdac4877206 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/config/config.go +++ b/vendor/github.com/open-policy-agent/opa/internal/config/config.go @@ -15,11 +15,11 @@ import ( "sigs.k8s.io/yaml" "github.com/open-policy-agent/opa/internal/strvals" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" - "github.com/open-policy-agent/opa/plugins/rest" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/logging" + "github.com/open-policy-agent/opa/v1/plugins/rest" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/util" ) // ServiceOptions stores the options passed to ParseServicesConfig diff --git a/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go b/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go index 89e7e137b74..8e4d65ed3ff 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go +++ b/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go @@ -36,7 +36,7 @@ func (vector *BitVector) Length() int { // position of the last byte in the slice. // This returns the bit that was shifted off of the last byte. func shiftLower(bit byte, b []byte) byte { - bit = bit << 7 + bit <<= 7 for i := len(b) - 1; i >= 0; i-- { newByte := b[i] >> 1 newByte |= bit diff --git a/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go b/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go index 9cfaee8bafc..378fe99a32d 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go +++ b/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go @@ -146,14 +146,14 @@ package edittree import ( - "encoding/json" + "errors" "fmt" "math/big" "sort" "strings" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/edittree/bitvector" + "github.com/open-policy-agent/opa/v1/ast" ) // Deletions are encoded with a nil value pointer. @@ -213,10 +213,10 @@ func (e *EditTree) getKeyHash(key *ast.Term) (int, bool) { case ast.Null, ast.Boolean, ast.String, ast.Var: equal = func(y ast.Value) bool { return x == y } case ast.Number: - if xi, err := json.Number(x).Int64(); err == nil { + if xi, ok := x.Int64(); ok { equal = func(y ast.Value) bool { if y, ok := y.(ast.Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { + if yi, ok := y.Int64(); ok { return xi == yi } } @@ -336,13 +336,13 @@ func (e *EditTree) deleteChildValue(hash int) { // Insert creates a new child of e, and returns the new child EditTree node. func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) { if e.value == nil { - return nil, fmt.Errorf("deleted node encountered during insert operation") + return nil, errors.New("deleted node encountered during insert operation") } if key == nil { - return nil, fmt.Errorf("nil key provided for insert operation") + return nil, errors.New("nil key provided for insert operation") } if value == nil { - return nil, fmt.Errorf("nil value provided for insert operation") + return nil, errors.New("nil value provided for insert operation") } switch x := e.value.Value.(type) { @@ -368,7 +368,7 @@ func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) { return nil, err } if idx < 0 || idx > e.insertions.Length() { - return nil, fmt.Errorf("index for array insertion out of bounds") + return nil, errors.New("index for array insertion out of bounds") } return e.unsafeInsertArray(idx, value), nil default: @@ -458,10 +458,10 @@ func (e *EditTree) unsafeInsertArray(idx int, value *ast.Term) *EditTree { // already present in e. It then returns the deleted child EditTree node. func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { if e.value == nil { - return nil, fmt.Errorf("deleted node encountered during delete operation") + return nil, errors.New("deleted node encountered during delete operation") } if key == nil { - return nil, fmt.Errorf("nil key provided for delete operation") + return nil, errors.New("nil key provided for delete operation") } switch e.value.Value.(type) { @@ -532,7 +532,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { return nil, err } if idx < 0 || idx > e.insertions.Length()-1 { - return nil, fmt.Errorf("index for array delete out of bounds") + return nil, errors.New("index for array delete out of bounds") } // Collect insertion indexes above the delete site for rewriting. @@ -553,14 +553,14 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { } // Do rewrites to clear out the newly-removed element. e.deleteChildValue(idx) - for i := 0; i < len(rewritesScalars); i++ { + for i := range rewritesScalars { originalIdx := rewritesScalars[i] rewriteIdx := rewritesScalars[i] - 1 v := e.childScalarValues[originalIdx] e.deleteChildValue(originalIdx) e.setChildScalarValue(rewriteIdx, v) } - for i := 0; i < len(rewritesComposites); i++ { + for i := range rewritesComposites { originalIdx := rewritesComposites[i] rewriteIdx := rewritesComposites[i] - 1 v := e.childCompositeValues[originalIdx] @@ -592,7 +592,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { //gcassert:inline func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int { zeroesSeen := 0 - for i := 0; i < index; i++ { + for i := range index { if bv.Element(i) == 0 { zeroesSeen++ } @@ -602,7 +602,7 @@ func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int { func findIndexOfNthZero(n int, bv *bitvector.BitVector) (int, bool) { zeroesSeen := 0 - for i := 0; i < bv.Length(); i++ { + for i := range bv.Length() { if bv.Element(i) == 0 { zeroesSeen++ } @@ -638,7 +638,7 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { } // 1+ path segment case. if e.value == nil { - return nil, fmt.Errorf("nil value encountered where composite value was expected") + return nil, errors.New("nil value encountered where composite value was expected") } // Switch behavior based on types. @@ -725,9 +725,9 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { // Fall back to looking up the key in e.value. // Extend the tree if key is present. Error otherwise. - if v, err := x.Find(ast.Ref{ast.IntNumberTerm(idx)}); err == nil { + if v, err := x.Find(ast.Ref{ast.InternedIntNumberTerm(idx)}); err == nil { // TODO: Consider a more efficient "Replace" function that special-cases this for arrays instead? - _, err := e.Delete(ast.IntNumberTerm(idx)) + _, err := e.Delete(ast.InternedIntNumberTerm(idx)) if err != nil { return nil, err } @@ -832,7 +832,7 @@ func (e *EditTree) Render() *ast.Term { // original array. We build a new Array with modified/deleted keys. out := make([]*ast.Term, 0, e.insertions.Length()) eIdx := 0 - for i := 0; i < e.insertions.Length(); i++ { + for i := range e.insertions.Length() { // If the index == 0, that indicates we should look up the next // surviving original element. // If the index == 1, that indicates we should look up that @@ -880,7 +880,7 @@ func (e *EditTree) Render() *ast.Term { // Returns the inserted EditTree node. func (e *EditTree) InsertAtPath(path ast.Ref, value *ast.Term) (*EditTree, error) { if value == nil { - return nil, fmt.Errorf("cannot insert nil value into EditTree") + return nil, errors.New("cannot insert nil value into EditTree") } if len(path) == 0 { @@ -911,7 +911,7 @@ func (e *EditTree) DeleteAtPath(path ast.Ref) (*EditTree, error) { // Root document case: if len(path) == 0 { if e.value == nil { - return nil, fmt.Errorf("deleted node encountered during delete operation") + return nil, errors.New("deleted node encountered during delete operation") } e.value = nil e.childKeys = nil @@ -1026,8 +1026,7 @@ func (e *EditTree) Exists(path ast.Ref) bool { } // Fallback if child lookup failed. // We have to ensure that the lookup term is a number here, or Find will fail. - k := ast.Ref{ast.IntNumberTerm(idx)}.Concat(path[1:]) - _, err = x.Find(k) + _, err = x.Find(ast.Ref{ast.InternedIntNumberTerm(idx)}.Concat(path[1:])) return err == nil default: // Catch all primitive types. @@ -1048,7 +1047,7 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) { switch v := term.Value.(type) { case ast.Number: if i, ok = v.Int(); !ok { - return 0, fmt.Errorf("invalid number type for indexing") + return 0, errors.New("invalid number type for indexing") } case ast.String: if v == "-" { @@ -1056,13 +1055,13 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) { } num := ast.Number(v) if i, ok = num.Int(); !ok { - return 0, fmt.Errorf("invalid string for indexing") + return 0, errors.New("invalid string for indexing") } if v != "0" && strings.HasPrefix(string(v), "0") { - return 0, fmt.Errorf("leading zeros are not allowed in JSON paths") + return 0, errors.New("leading zeros are not allowed in JSON paths") } default: - return 0, fmt.Errorf("invalid type for indexing") + return 0, errors.New("invalid type for indexing") } return i, nil @@ -1181,5 +1180,5 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { type termSlice []*ast.Term func (s termSlice) Less(i, j int) bool { return ast.Compare(s[i].Value, s[j].Value) < 0 } -func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } +func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s termSlice) Len() int { return len(s) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go b/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go index cf5721101a9..f8037ed63e9 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go +++ b/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go @@ -4,7 +4,7 @@ package future -import "github.com/open-policy-agent/opa/ast" +import "github.com/open-policy-agent/opa/v1/ast" // FilterFutureImports filters OUT any future imports from the passed slice of // `*ast.Import`s. @@ -19,12 +19,14 @@ func FilterFutureImports(imps []*ast.Import) []*ast.Import { return ret } +var keywordsTerm = ast.StringTerm("keywords") + // IsAllFutureKeywords returns true if the passed *ast.Import is `future.keywords` func IsAllFutureKeywords(imp *ast.Import) bool { path := imp.Path.Value.(ast.Ref) return len(path) == 2 && ast.FutureRootDocument.Equal(path[0]) && - path[1].Equal(ast.StringTerm("keywords")) + path[1].Equal(keywordsTerm) } // IsFutureKeyword returns true if the passed *ast.Import is `future.keywords.{kw}` @@ -32,7 +34,7 @@ func IsFutureKeyword(imp *ast.Import, kw string) bool { path := imp.Path.Value.(ast.Ref) return len(path) == 3 && ast.FutureRootDocument.Equal(path[0]) && - path[1].Equal(ast.StringTerm("keywords")) && + path[1].Equal(keywordsTerm) && path[2].Equal(ast.StringTerm(kw)) } @@ -40,7 +42,7 @@ func WhichFutureKeyword(imp *ast.Import) (string, bool) { path := imp.Path.Value.(ast.Ref) if len(path) == 3 && ast.FutureRootDocument.Equal(path[0]) && - path[1].Equal(ast.StringTerm("keywords")) { + path[1].Equal(keywordsTerm) { if str, ok := path[2].Value.(ast.String); ok { return string(str), true } diff --git a/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go b/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go index 804702b945a..eaeb87e2969 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go +++ b/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go @@ -5,9 +5,10 @@ package future import ( + "errors" "fmt" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // ParserOptionsFromFutureImports transforms a slice of `ast.Import`s into the @@ -33,7 +34,7 @@ func ParserOptionsFromFutureImports(imports []*ast.Import) (ast.ParserOptions, e } if len(path) == 3 { if imp.Alias != "" { - return popts, fmt.Errorf("alias not supported") + return popts, errors.New("alias not supported") } popts.FutureKeywords = append(popts.FutureKeywords, string(path[2].Value.(ast.String))) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go index 876419f56c4..515702095b2 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go @@ -25,10 +25,6 @@ package gojsonschema -import ( - "fmt" -) - type schemaReferencePool struct { documents map[string]*SubSchema } @@ -44,7 +40,7 @@ func newSchemaReferencePool() *schemaReferencePool { func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) { if internalLogEnabled { - internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + internalLog("Schema Reference ( %s )", ref) } if sch, ok := p.documents[ref]; ok { @@ -60,7 +56,7 @@ func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) { func (p *schemaReferencePool) Add(ref string, sch *SubSchema) { if internalLogEnabled { - internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + internalLog("Add Schema Reference %s to pool", ref) } if _, ok := p.documents[ref]; !ok { p.documents[ref] = sch diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go index 7c86e37245e..efdea58b6bc 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go @@ -348,7 +348,7 @@ func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode inte } } - if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if len(currentSubSchema.dependencies) > 0 { if currentNodeMap, ok := currentNode.(map[string]interface{}); ok { for elementKey := range currentNodeMap { if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { @@ -469,7 +469,7 @@ func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface result.mergeErrors(validationResult) } } else { - if currentSubSchema.ItemsChildren != nil && len(currentSubSchema.ItemsChildren) > 0 { + if len(currentSubSchema.ItemsChildren) > 0 { nbItems := len(currentSubSchema.ItemsChildren) diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go index d2039081689..ee3d4df3ac0 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go @@ -29,7 +29,7 @@ type Definition struct { Types []string // union EnumValues EnumValueList // enum - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` BuiltIn bool `dump:"-"` } @@ -65,7 +65,7 @@ type FieldDefinition struct { DefaultValue *Value // only for input objects Type *Type Directives DirectiveList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } type ArgumentDefinition struct { @@ -74,14 +74,14 @@ type ArgumentDefinition struct { DefaultValue *Value Type *Type Directives DirectiveList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } type EnumValueDefinition struct { Description string Name string Directives DirectiveList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } type DirectiveDefinition struct { @@ -90,5 +90,5 @@ type DirectiveDefinition struct { Arguments ArgumentDefinitionList Locations []DirectiveLocation IsRepeatable bool - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go index 5f6e8531f58..b11867c2e4b 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go @@ -30,7 +30,7 @@ const ( type Directive struct { Name string Arguments ArgumentList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` // Requires validation ParentDefinition *Definition diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go index 43bfb54ff5c..4a6654b9aa3 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go @@ -3,7 +3,7 @@ package ast type QueryDocument struct { Operations OperationList Fragments FragmentDefinitionList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } type SchemaDocument struct { @@ -12,7 +12,7 @@ type SchemaDocument struct { Directives DirectiveDefinitionList Definitions DefinitionList Extensions DefinitionList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } func (d *SchemaDocument) Merge(other *SchemaDocument) { @@ -69,11 +69,11 @@ type SchemaDefinition struct { Description string Directives DirectiveList OperationTypes OperationTypeDefinitionList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } type OperationTypeDefinition struct { Operation Operation Type string - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go index dbb7a7efaf3..84266a618fc 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go @@ -40,10 +40,10 @@ func (d *dumper) dump(v reflect.Value) { d.WriteString("false") } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - d.WriteString(fmt.Sprintf("%d", v.Int())) + d.WriteString(strconv.FormatInt(v.Int(), 10)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - d.WriteString(fmt.Sprintf("%d", v.Uint())) + d.WriteString(strconv.FormatUint(v.Uint(), 10)) case reflect.Float32, reflect.Float64: d.WriteString(fmt.Sprintf("%.2f", v.Float())) @@ -88,7 +88,7 @@ func typeName(t reflect.Type) string { func (d *dumper) dumpArray(v reflect.Value) { d.WriteString("[" + typeName(v.Type().Elem()) + "]") - for i := 0; i < v.Len(); i++ { + for i := range v.Len() { d.nl() d.WriteString("- ") d.indent++ @@ -102,7 +102,7 @@ func (d *dumper) dumpStruct(v reflect.Value) { d.indent++ typ := v.Type() - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { f := v.Field(i) if typ.Field(i).Tag.Get("dump") == "-" { continue @@ -132,13 +132,13 @@ func isZero(v reflect.Value) bool { return true } z := true - for i := 0; i < v.Len(); i++ { + for i := range v.Len() { z = z && isZero(v.Index(i)) } return z case reflect.Struct: z := true - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { z = z && isZero(v.Field(i)) } return z diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go index 57ab56c7c68..723d8339990 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go @@ -8,7 +8,7 @@ type FragmentSpread struct { ObjectDefinition *Definition Definition *FragmentDefinition - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } type InlineFragment struct { @@ -19,7 +19,7 @@ type InlineFragment struct { // Require validation ObjectDefinition *Definition - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } type FragmentDefinition struct { @@ -34,5 +34,5 @@ type FragmentDefinition struct { // Require validation Definition *Definition - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go index 3b37f81bf34..5fc2f3b2461 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go @@ -14,7 +14,7 @@ type OperationDefinition struct { VariableDefinitions VariableDefinitionList Directives DirectiveList SelectionSet SelectionSet - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } type VariableDefinition struct { @@ -22,7 +22,7 @@ type VariableDefinition struct { Type *Type DefaultValue *Value Directives DirectiveList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` // Requires validation Definition *Definition diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go index 5ef26c6ab38..677300edd53 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go @@ -21,7 +21,7 @@ type Field struct { Arguments ArgumentList Directives DirectiveList SelectionSet SelectionSet - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` // Require validation Definition *FieldDefinition @@ -31,7 +31,7 @@ type Field struct { type Argument struct { Name string Value *Value - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } func (s *Field) ArgumentMap(vars map[string]interface{}) map[string]interface{} { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go index 5f77bc7ce47..669f1da57ee 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go @@ -20,7 +20,7 @@ type Type struct { NamedType string Elem *Type NonNull bool - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } func (t *Type) Name() string { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go index c25ef150593..ae23a98d7d1 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go @@ -25,7 +25,7 @@ type Value struct { Raw string Children ChildValueList Kind ValueKind - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` // Require validation Definition *Definition @@ -36,7 +36,7 @@ type Value struct { type ChildValue struct { Name string Value *Value - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` } func (v *Value) Value(vars map[string]interface{}) (interface{}, error) { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go index d536e5e5f42..f6817674759 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go @@ -27,7 +27,7 @@ func init() { } addError( - Message(message), + Message(message), //nolint:govet At(field.Position), ) }) diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go index 66bd348c478..861e3b16cf5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go @@ -20,7 +20,7 @@ func init() { message := fmt.Sprintf(`Fragment cannot condition on non composite type "%s".`, inlineFragment.TypeCondition) addError( - Message(message), + Message(message), //nolint:govet At(inlineFragment.Position), ) }) @@ -33,7 +33,7 @@ func init() { message := fmt.Sprintf(`Fragment "%s" cannot condition on non composite type "%s".`, fragment.Name, fragment.TypeCondition) addError( - Message(message), + Message(message), //nolint:govet At(fragment.Position), ) }) diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go index a7de611f176..edc562ddd42 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go @@ -51,7 +51,7 @@ func init() { } var via string if len(fragmentNames) != 0 { - via = fmt.Sprintf(" via %s", strings.Join(fragmentNames, ", ")) + via = " via " + strings.Join(fragmentNames, ", ") } addError( Message(`Cannot spread fragment "%s" within itself%s.`, spreadName, via), diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go index 8858023d4e9..afd9f54f106 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go @@ -159,8 +159,6 @@ func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption { return Message(`Float cannot represent non numeric value: %s`, v.String()) case "ID", "ID!": return Message(`ID cannot represent a non-string and non-integer value: %s`, v.String()) - //case "Enum": - // return Message(`Enum "%s" cannot represent non-enum value: %s`, v.ExpectedType.String(), v.String()) default: if v.Definition.Kind == ast.Enum { return Message(`Enum "%s" cannot represent non-enum value: %s.`, v.ExpectedType.String(), v.String()) diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go index 86be7c4df20..66924148ba1 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go @@ -2,6 +2,7 @@ package validator import ( "encoding/json" + "errors" "fmt" "reflect" "strconv" @@ -11,7 +12,7 @@ import ( "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror" ) -var ErrUnexpectedType = fmt.Errorf("Unexpected Type") +var ErrUnexpectedType = errors.New("Unexpected Type") // VariableValues coerces and validates variable values func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables map[string]interface{}) (map[string]interface{}, error) { @@ -106,7 +107,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec slc = reflect.Append(slc, val) val = slc } - for i := 0; i < val.Len(); i++ { + for i := range val.Len() { resetPath() v.path = append(v.path, ast.PathIndex(i)) field := val.Index(i) @@ -222,7 +223,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec if fieldDef.Type.NonNull && field.IsNil() { return val, gqlerror.ErrorPathf(v.path, "cannot be null") } - //allow null object field and skip it + // allow null object field and skip it if !fieldDef.Type.NonNull && field.IsNil() { continue } diff --git a/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go b/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go index 31c89869da2..9ddb93506ef 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go +++ b/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go @@ -7,7 +7,7 @@ package patch import ( "strings" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/storage" ) // ParsePatchPathEscaped returns a new path for the given escaped str. @@ -37,8 +37,8 @@ func ParsePatchPathEscaped(str string) (path storage.Path, ok bool) { // the substitutions in this order, an implementation avoids the error of // turning '~01' first into '~1' and then into '/', which would be // incorrect (the string '~01' correctly becomes '~1' after transformation)." - path[i] = strings.Replace(path[i], "~1", "/", -1) - path[i] = strings.Replace(path[i], "~0", "~", -1) + path[i] = strings.ReplaceAll(path[i], "~1", "/") + path[i] = strings.ReplaceAll(path[i], "~0", "~") } return diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go index aa22a3830f2..7de27d4e4e2 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go +++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go @@ -114,7 +114,7 @@ func parse(jwkSrc string) (*Set, error) { // ParseBytes parses JWK from the incoming byte buffer. func ParseBytes(buf []byte) (*Set, error) { - return parse(string(buf[:])) + return parse(string(buf)) } // ParseString parses JWK from the incoming string. diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go index e8fe4cd8541..c02b0b9990f 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go +++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go @@ -2,6 +2,7 @@ package jwk import ( "encoding/json" + "errors" "fmt" ) @@ -53,12 +54,12 @@ func (keyOperationList *KeyOperationList) UnmarshalJSON(data []byte) error { var tempKeyOperationList []string err := json.Unmarshal(data, &tempKeyOperationList) if err != nil { - return fmt.Errorf("invalid key operation") + return errors.New("invalid key operation") } for _, value := range tempKeyOperationList { _, ok := keyOps[value] if !ok { - return fmt.Errorf("unknown key operation") + return errors.New("unknown key operation") } *keyOperationList = append(*keyOperationList, KeyOperation(value)) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go index 2a5fe3c1738..20fb957d3e5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go +++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go @@ -111,7 +111,7 @@ func Verify(buf []byte, alg jwa.SignatureAlgorithm, key interface{}) (ret []byte return nil, errors.New(`attempt to verify empty buffer`) } - parts, err := SplitCompact(string(buf[:])) + parts, err := SplitCompact(string(buf)) if err != nil { return nil, fmt.Errorf("failed extract from compact serialization format: %w", err) } @@ -164,7 +164,7 @@ func VerifyWithJWKSet(buf []byte, keyset *jwk.Set) (payload []byte, err error) { // ParseByte parses a JWS value serialized via compact serialization and provided as []byte. func ParseByte(jwsCompact []byte) (m *Message, err error) { - return parseCompact(string(jwsCompact[:])) + return parseCompact(string(jwsCompact)) } // ParseString parses a JWS value serialized via compact serialization and provided as string. diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go index 7db7bbd69cb..fd123eb7597 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go +++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go @@ -3,6 +3,7 @@ package sign import ( "crypto/x509" "encoding/pem" + "errors" "fmt" "github.com/open-policy-agent/opa/internal/jwx/jwa" @@ -30,7 +31,7 @@ func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512: block, _ := pem.Decode([]byte(key)) if block == nil { - return nil, fmt.Errorf("failed to parse PEM block containing the key") + return nil, errors.New("failed to parse PEM block containing the key") } priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) @@ -45,7 +46,7 @@ func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) case jwa.ES256, jwa.ES384, jwa.ES512: block, _ := pem.Decode([]byte(key)) if block == nil { - return nil, fmt.Errorf("failed to parse PEM block containing the key") + return nil, errors.New("failed to parse PEM block containing the key") } priv, err := x509.ParseECPrivateKey(block.Bytes) diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go index 05720a64e05..04ee9141e93 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go +++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go @@ -5,6 +5,7 @@ import ( "crypto/rsa" "crypto/x509" "encoding/pem" + "errors" "fmt" "github.com/open-policy-agent/opa/internal/jwx/jwa" @@ -33,7 +34,7 @@ func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512, jwa.ES256, jwa.ES384, jwa.ES512: block, _ := pem.Decode([]byte(key)) if block == nil { - return nil, fmt.Errorf("failed to parse PEM block containing the key") + return nil, errors.New("failed to parse PEM block containing the key") } pub, err := x509.ParsePKIXPublicKey(block.Bytes) diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go index b75d26ddab3..661703e08c2 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go @@ -11,10 +11,10 @@ import ( "io" "sort" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/ast/location" "github.com/open-policy-agent/opa/internal/debug" - "github.com/open-policy-agent/opa/ir" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/ir" ) // QuerySet represents the input to the planner. @@ -223,7 +223,7 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) { } // Initialize parameters for functions. - for i := 0; i < len(rules[0].Head.Args); i++ { + for range len(rules[0].Head.Args) { fn.Params = append(fn.Params, p.newLocal()) } @@ -385,7 +385,7 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) { return nil }) default: - return fmt.Errorf("illegal rule kind") + return errors.New("illegal rule kind") } }) }) @@ -497,7 +497,6 @@ func (p *Planner) planDotOr(obj ir.Local, key ir.Operand, or stmtFactory, iter p func (p *Planner) planNestedObjects(obj ir.Local, ref ast.Ref, iter planLocalIter) error { if len(ref) == 0 { - //return fmt.Errorf("nested object construction didn't create object") return iter(obj) } @@ -991,8 +990,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error { op := e.Operator() if replacement := p.mocks.Lookup(operator); replacement != nil { - switch r := replacement.Value.(type) { - case ast.Ref: + if r, ok := replacement.Value.(ast.Ref); ok { if !r.HasPrefix(ast.DefaultRootRef) && !r.HasPrefix(ast.InputRootRef) { // replacement is builtin operator = r.String() @@ -1037,7 +1035,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error { args = p.defaultOperands() } else if decl, ok := p.decls[operator]; ok { relation = decl.Relation - arity = len(decl.Decl.Args()) + arity = decl.Decl.Arity() void = decl.Decl.Result() == nil name = operator p.externs[operator] = decl @@ -1147,7 +1145,7 @@ func (p *Planner) planExprCallFunc(name string, arity int, void bool, operands [ }) default: - return fmt.Errorf("impossible replacement, arity mismatch") + return errors.New("impossible replacement, arity mismatch") } } @@ -1173,7 +1171,7 @@ func (p *Planner) planExprCallValue(value *ast.Term, arity int, operands []*ast. }) }) default: - return fmt.Errorf("impossible replacement, arity mismatch") + return errors.New("impossible replacement, arity mismatch") } } @@ -1519,7 +1517,7 @@ func (p *Planner) planValue(t ast.Value, loc *ast.Location, iter planiter) error p.loc = loc return p.planObjectComprehension(v, iter) default: - return fmt.Errorf("%v term not implemented", ast.TypeName(v)) + return fmt.Errorf("%v term not implemented", ast.ValueName(v)) } } @@ -1750,7 +1748,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error { head, ok := ref[0].Value.(ast.Var) if !ok { - return fmt.Errorf("illegal ref: non-var head") + return errors.New("illegal ref: non-var head") } if head.Compare(ast.DefaultRootDocument.Value) == 0 { @@ -1767,7 +1765,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error { p.ltarget, ok = p.vars.GetOp(head) if !ok { - return fmt.Errorf("illegal ref: unsafe head") + return errors.New("illegal ref: unsafe head") } return p.planRefRec(ref, 1, iter) @@ -2365,6 +2363,10 @@ func rewrittenVar(vars map[ast.Var]ast.Var, k ast.Var) ast.Var { return rw } +func dont() ([][]*ast.Rule, []ir.Operand, int, bool) { + return nil, nil, 0, false +} + // optimizeLookup returns a set of rulesets and required statements planning // the locals (strings) needed with the used local variables, and the index // into ref's parth that is still to be planned; if the passed ref's vars @@ -2381,9 +2383,6 @@ func rewrittenVar(vars map[ast.Var]ast.Var, k ast.Var) ast.Var { // var actually matched_ -- so we don't know which subtree to evaluate // with the results. func (p *Planner) optimizeLookup(t *ruletrie, ref ast.Ref) ([][]*ast.Rule, []ir.Operand, int, bool) { - dont := func() ([][]*ast.Rule, []ir.Operand, int, bool) { - return nil, nil, 0, false - } if t == nil { p.debugf("no optimization of %s: trie is nil", ref) return dont() @@ -2411,6 +2410,10 @@ outer: opt = true // take all children, they might match for _, node := range nodes { + if nr := node.Rules(); len(nr) > 0 { + p.debugf("no optimization of %s: node with rules (%v)", ref, refsOfRules(nr)) + return dont() + } for _, child := range node.Children() { if node := node.Get(child); node != nil { nextNodes = append(nextNodes, node) @@ -2418,8 +2421,12 @@ outer: } } case ast.String: - // take all children that either match or have a var key + // take all children that either match or have a var key // TODO(sr): Where's the code for the second part, having a var key? for _, node := range nodes { + if nr := node.Rules(); len(nr) > 0 { + p.debugf("no optimization of %s: node with rules (%v)", ref, refsOfRules(nr)) + return dont() + } if node := node.Get(r); node != nil { nextNodes = append(nextNodes, node) } @@ -2438,10 +2445,20 @@ outer: // let us break, too. all := 0 for _, node := range nodes { - all += node.ChildrenCount() + if i < len(ref)-1 { + // Look ahead one term to only count those children relevant to your planned ref. + switch ref[i+1].Value.(type) { + case ast.Var: + all += node.ChildrenCount() + default: + if relChildren := node.Get(ref[i+1].Value); relChildren != nil { + all++ + } + } + } } if all == 0 { - p.debugf("ref %s: all nodes have 0 children, break", ref[0:index+1]) + p.debugf("ref %s: all nodes have 0 relevant children, break", ref[0:index+1]) break } @@ -2550,3 +2567,11 @@ func (p *Planner) isFunction(r ast.Ref) bool { func op(v ir.Val) ir.Operand { return ir.Operand{Value: v} } + +func refsOfRules(rs []*ast.Rule) []string { + refs := make([]string, len(rs)) + for i := range rs { + refs[i] = rs[i].Head.Ref().String() + } + return refs +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go index f5d6f3fc6c5..cc7f12bd2b5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go @@ -4,7 +4,7 @@ import ( "fmt" "sort" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // funcstack implements a simple map structure used to keep track of virtual @@ -111,7 +111,7 @@ func (t *ruletrie) Rules() []*ast.Rule { func (t *ruletrie) Push(key ast.Ref) { node := t - for i := 0; i < len(key)-1; i++ { + for i := range len(key) - 1 { node = node.Get(key[i].Value) if node == nil { return @@ -123,7 +123,7 @@ func (t *ruletrie) Push(key ast.Ref) { func (t *ruletrie) Pop(key ast.Ref) { node := t - for i := 0; i < len(key)-1; i++ { + for i := range len(key) - 1 { node = node.Get(key[i].Value) if node == nil { return diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go b/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go index dccff1b5c1d..0df6bcd8b2c 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go @@ -5,8 +5,8 @@ package planner import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/ir" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ir" ) type varstack []map[ast.Var]ir.Local diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go index 1d0f25f8c20..103dc776673 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go @@ -1,6 +1,6 @@ package crypto -import "fmt" +import "errors" // ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison // if the two byte slices assuming they represent a big-endian number. @@ -11,7 +11,7 @@ import "fmt" // +1 if x > y func ConstantTimeByteCompare(x, y []byte) (int, error) { if len(x) != len(y) { - return 0, fmt.Errorf("slice lengths do not match") + return 0, errors.New("slice lengths do not match") } xLarger, yLarger := 0, 0 diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go index 758c73fcb3e..12679a15be3 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go @@ -7,6 +7,7 @@ import ( "crypto/hmac" "encoding/asn1" "encoding/binary" + "errors" "fmt" "hash" "math" @@ -82,7 +83,7 @@ func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, con // verify the requested bit length is not larger then the length encoding size if int64(bitLen) > 0x7FFFFFFF { - return nil, fmt.Errorf("bitLen is greater than 32-bits") + return nil, errors.New("bitLen is greater than 32-bits") } fixedInput := bytes.NewBuffer(nil) diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go index 179b5b5d5e7..55e587e9f53 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go @@ -11,7 +11,7 @@ import ( "time" "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) // Values taken from diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go index 77c0bc93491..6dfb06a4961 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go @@ -10,7 +10,7 @@ import ( "time" "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) // Values taken from diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go index bfb780754be..07aa568fa2f 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go @@ -8,18 +8,19 @@ import ( "bytes" "crypto/hmac" "crypto/sha256" + "encoding/hex" "errors" "fmt" "io" "net/http" "net/url" - "sort" "strings" "time" v4 "github.com/open-policy-agent/opa/internal/providers/aws/v4" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) func stringFromTerm(t *ast.Term) string { @@ -67,19 +68,6 @@ func sha256MAC(message string, key []byte) []byte { return mac.Sum(nil) } -func sortKeys(strMap map[string][]string) []string { - keys := make([]string, len(strMap)) - - i := 0 - for k := range strMap { - keys[i] = k - i++ - } - sort.Strings(keys) - - return keys -} - // SignRequest modifies an http.Request to include an AWS V4 signature based on the provided credentials. func SignRequest(req *http.Request, service string, creds Credentials, theTime time.Time, sigVersion string) error { // General ref. https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html @@ -168,7 +156,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] canonicalReq += theURL.RawQuery + "\n" // RAW Query String // include the values for the signed headers - orderedKeys := sortKeys(headersToSign) + orderedKeys := util.KeysSorted(headersToSign) for _, k := range orderedKeys { canonicalReq += k + ":" + strings.Join(headersToSign[k], ",") + "\n" } @@ -202,7 +190,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] authHeader := "AWS4-HMAC-SHA256 Credential=" + awsCreds.AccessKey + "/" + dateNow authHeader += "/" + awsCreds.RegionName + "/" + service + "/aws4_request," authHeader += "SignedHeaders=" + headerList + "," - authHeader += "Signature=" + fmt.Sprintf("%x", signature) + authHeader += "Signature=" + hex.EncodeToString(signature) return authHeader, awsHeaders } diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go index 929f2006e74..8f6d760e82a 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go @@ -9,7 +9,7 @@ import ( "crypto/rand" "crypto/sha256" "encoding/hex" - "fmt" + "errors" "hash" "io" "math/big" @@ -107,7 +107,7 @@ func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, counter++ if counter > 0xFF { - return nil, fmt.Errorf("exhausted single byte external counter") + return nil, errors.New("exhausted single byte external counter") } } d = d.Add(d, one) @@ -146,7 +146,7 @@ func retrievePrivateKey(symmetric Credentials) (v4aCredentials, error) { privateKey, err := deriveKeyFromAccessKeyPair(symmetric.AccessKey, symmetric.SecretKey) if err != nil { - return v4aCredentials{}, fmt.Errorf("failed to derive asymmetric key from credentials") + return v4aCredentials{}, errors.New("failed to derive asymmetric key from credentials") } creds := v4aCredentials{ @@ -216,7 +216,7 @@ func (s *httpSigner) Build() (signedRequest, error) { signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) - rawQuery := strings.Replace(query.Encode(), "+", "%20", -1) + rawQuery := strings.ReplaceAll(query.Encode(), "+", "%20") canonicalURI := v4Internal.GetURIPath(req.URL) @@ -280,7 +280,7 @@ func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature return parts.String() } -func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { +func (*httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { signed = make(http.Header) const hostHeader = "host" @@ -314,7 +314,7 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he var canonicalHeaders strings.Builder n := len(headers) const colon = ':' - for i := 0; i < n; i++ { + for i := range n { if headers[i] == hostHeader { canonicalHeaders.WriteString(hostHeader) canonicalHeaders.WriteRune(colon) diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go index e033da74606..9ce9af90dac 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go @@ -5,7 +5,7 @@ import ( "io" "net/http" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) // DoRequestWithClient is a convenience function to get the body of an HTTP response with diff --git a/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go b/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go index 6e84df4b08a..173b5a3c1be 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go +++ b/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go @@ -9,8 +9,8 @@ import ( "errors" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" ) // ParseDataPath returns a ref from the slash separated path s rooted at data. diff --git a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go index b58a05ee8e9..072e37667ab 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go +++ b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go @@ -4,11 +4,11 @@ import ( "io" "time" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" ) // Result holds the evaluation result. diff --git a/vendor/github.com/open-policy-agent/opa/internal/report/report.go b/vendor/github.com/open-policy-agent/opa/internal/report/report.go index 145d0a94651..55f4cfe2103 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/report/report.go +++ b/vendor/github.com/open-policy-agent/opa/internal/report/report.go @@ -17,12 +17,12 @@ import ( "sync" "time" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/logging" + "github.com/open-policy-agent/opa/v1/version" - "github.com/open-policy-agent/opa/plugins/rest" - "github.com/open-policy-agent/opa/util" - "github.com/open-policy-agent/opa/version" + "github.com/open-policy-agent/opa/v1/plugins/rest" + "github.com/open-policy-agent/opa/v1/util" ) // ExternalServiceURL is the base HTTP URL for a telemetry service. diff --git a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go index b1a5b71577f..814847a12a1 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go +++ b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go @@ -12,12 +12,12 @@ import ( "path/filepath" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/bundle" storedversion "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/loader" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/loader" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" ) // InsertAndCompileOptions contains the input for the operation. @@ -53,6 +53,7 @@ func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*Inser } compiler := ast.NewCompiler(). + WithDefaultRegoVersion(opts.ParserOptions.RegoVersion). SetErrorLimit(opts.MaxErrors). WithPathConflictsCheck(storage.NonEmpty(ctx, opts.Store, opts.Txn)). WithEnablePrintStatements(opts.EnablePrintStatements) diff --git a/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go b/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go index 08f3bf9182e..f2838ac36aa 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go +++ b/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go @@ -57,7 +57,7 @@ func TruncateFilePaths(maxIdealWidth, maxWidth int, path ...string) (map[string] } // Drop the overall length down to match our substitution - longestLocation = longestLocation - (len(lcs) - 3) + longestLocation -= (len(lcs) - 3) } return result, longestLocation diff --git a/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go b/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go index 1fc07f68c36..3b12d9526b6 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go +++ b/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go @@ -31,7 +31,7 @@ var ErrNotList = errors.New("not a list") // MaxIndex is the maximum index that will be allowed by setIndex. // The default value 65536 = 1024 * 64 -var MaxIndex = 65536 +const MaxIndex = 65536 // ToYAML takes a string of arguments and converts to a YAML document. func ToYAML(s string) (string, error) { @@ -148,8 +148,6 @@ func (t *parser) key(data map[string]interface{}) error { return err } return fmt.Errorf("key %q has no value", string(k)) - //set(data, string(k), "") - //return err case last == '[': // We are in a list index context, so we need to set an index. i, err := t.keyIndex() @@ -168,7 +166,7 @@ func (t *parser) key(data map[string]interface{}) error { set(data, kk, list) return err case last == '=': - //End of key. Consume =, Get value. + // End of key. Consume =, Get value. // FIXME: Get value list first vl, e := t.valList() switch e { diff --git a/vendor/github.com/open-policy-agent/opa/internal/version/version.go b/vendor/github.com/open-policy-agent/opa/internal/version/version.go index 1c2e9ecd012..dc52733fc28 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/version/version.go +++ b/vendor/github.com/open-policy-agent/opa/internal/version/version.go @@ -10,8 +10,8 @@ import ( "fmt" "runtime" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/version" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/version" ) var versionPath = storage.MustParsePath("/system/version") diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go index 35e6059c72e..0695ce94fe7 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go @@ -7,6 +7,7 @@ package encoding import ( "bytes" "encoding/binary" + "errors" "fmt" "io" @@ -105,7 +106,7 @@ func readMagic(r io.Reader) error { if err := binary.Read(r, binary.LittleEndian, &v); err != nil { return err } else if v != constant.Magic { - return fmt.Errorf("illegal magic value") + return errors.New("illegal magic value") } return nil } @@ -115,7 +116,7 @@ func readVersion(r io.Reader) error { if err := binary.Read(r, binary.LittleEndian, &v); err != nil { return err } else if v != constant.Version { - return fmt.Errorf("illegal wasm version") + return errors.New("illegal wasm version") } return nil } @@ -199,7 +200,7 @@ func readSections(r io.Reader, m *module.Module) error { return fmt.Errorf("code section: %w", err) } default: - return fmt.Errorf("illegal section id") + return errors.New("illegal section id") } } } @@ -269,7 +270,7 @@ func readNameMap(r io.Reader) ([]module.NameMap, error) { return nil, err } nm := make([]module.NameMap, n) - for i := uint32(0); i < n; i++ { + for i := range n { var name string id, err := leb128.ReadVarUint32(r) if err != nil { @@ -289,7 +290,7 @@ func readNameSectionLocals(r io.Reader, s *module.NameSection) error { if err != nil { return err } - for i := uint32(0); i < n; i++ { + for range n { id, err := leb128.ReadVarUint32(r) // func index if err != nil { return err @@ -326,7 +327,7 @@ func readTypeSection(r io.Reader, s *module.TypeSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var ftype module.FunctionType if err := readFunctionType(r, &ftype); err != nil { @@ -346,7 +347,7 @@ func readImportSection(r io.Reader, s *module.ImportSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var imp module.Import @@ -367,14 +368,14 @@ func readTableSection(r io.Reader, s *module.TableSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var table module.Table if elem, err := readByte(r); err != nil { return err } else if elem != constant.ElementTypeAnyFunc { - return fmt.Errorf("illegal element type") + return errors.New("illegal element type") } table.Type = types.Anyfunc @@ -396,7 +397,7 @@ func readMemorySection(r io.Reader, s *module.MemorySection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var mem module.Memory @@ -417,7 +418,7 @@ func readGlobalSection(r io.Reader, s *module.GlobalSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var global module.Global @@ -442,7 +443,7 @@ func readExportSection(r io.Reader, s *module.ExportSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var exp module.Export @@ -463,7 +464,7 @@ func readElementSection(r io.Reader, s *module.ElementSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var seg module.ElementSegment @@ -484,7 +485,7 @@ func readDataSection(r io.Reader, s *module.DataSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var seg module.DataSegment @@ -505,7 +506,7 @@ func readRawCodeSection(r io.Reader, s *module.RawCodeSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var seg module.RawCodeSegment if err := readRawCodeSegment(r, &seg); err != nil { @@ -547,7 +548,7 @@ func readGlobal(r io.Reader, global *module.Global) error { if b == 1 { global.Mutable = true } else if b != 0 { - return fmt.Errorf("illegal mutability flag") + return errors.New("illegal mutability flag") } return readConstantExpr(r, &global.Init) @@ -584,7 +585,7 @@ func readImport(r io.Reader, imp *module.Import) error { if elem, err := readByte(r); err != nil { return err } else if elem != constant.ElementTypeAnyFunc { - return fmt.Errorf("illegal element type") + return errors.New("illegal element type") } desc := module.TableImport{ Type: types.Anyfunc, @@ -617,12 +618,12 @@ func readImport(r io.Reader, imp *module.Import) error { if b == 1 { desc.Mutable = true } else if b != 0 { - return fmt.Errorf("illegal mutability flag") + return errors.New("illegal mutability flag") } return nil } - return fmt.Errorf("illegal import descriptor type") + return errors.New("illegal import descriptor type") } func readExport(r io.Reader, exp *module.Export) error { @@ -646,7 +647,7 @@ func readExport(r io.Reader, exp *module.Export) error { case constant.ExportDescGlobal: exp.Descriptor.Type = module.GlobalExportType default: - return fmt.Errorf("illegal export descriptor type") + return errors.New("illegal export descriptor type") } exp.Descriptor.Index, err = leb128.ReadVarUint32(r) @@ -727,7 +728,7 @@ func readExpr(r io.Reader, expr *module.Expr) (err error) { case error: err = r default: - err = fmt.Errorf("unknown panic") + err = errors.New("unknown panic") } } }() @@ -809,21 +810,21 @@ func readLimits(r io.Reader, l *module.Limit) error { return err } - min, err := leb128.ReadVarUint32(r) + minLim, err := leb128.ReadVarUint32(r) if err != nil { return err } - l.Min = min + l.Min = minLim if b == 1 { - max, err := leb128.ReadVarUint32(r) + maxLim, err := leb128.ReadVarUint32(r) if err != nil { return err } - l.Max = &max + l.Max = &maxLim } else if b != 0 { - return fmt.Errorf("illegal limit flag") + return errors.New("illegal limit flag") } return nil @@ -838,7 +839,7 @@ func readLocals(r io.Reader, locals *[]module.LocalDeclaration) error { ret := make([]module.LocalDeclaration, n) - for i := uint32(0); i < n; i++ { + for i := range n { if err := readVarUint32(r, &ret[i].Count); err != nil { return err } @@ -888,7 +889,7 @@ func readVarUint32Vector(r io.Reader, v *[]uint32) error { ret := make([]uint32, n) - for i := uint32(0); i < n; i++ { + for i := range n { if err := readVarUint32(r, &ret[i]); err != nil { return err } @@ -907,7 +908,7 @@ func readValueTypeVector(r io.Reader, v *[]types.ValueType) error { ret := make([]types.ValueType, n) - for i := uint32(0); i < n; i++ { + for i := range n { if err := readValueType(r, &ret[i]); err != nil { return err } diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go index 6917b8d1d1a..19df3bd6e64 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go @@ -7,6 +7,7 @@ package encoding import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "math" @@ -260,7 +261,7 @@ func writeTableSection(w io.Writer, s module.TableSection) error { return err } default: - return fmt.Errorf("illegal table element type") + return errors.New("illegal table element type") } if err := writeLimits(&buf, table.Lim); err != nil { return err @@ -588,7 +589,7 @@ func writeImport(w io.Writer, imp module.Import) error { } return writeByte(w, constant.Const) default: - return fmt.Errorf("illegal import descriptor type") + return errors.New("illegal import descriptor type") } } diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go index 913863c10cb..033d429c892 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go @@ -288,7 +288,7 @@ func (x ExportDescriptorType) String() string { } // Kind returns the function import type kind. -func (i FunctionImport) Kind() ImportDescriptorType { +func (FunctionImport) Kind() ImportDescriptorType { return FunctionImportType } @@ -297,7 +297,7 @@ func (i FunctionImport) String() string { } // Kind returns the memory import type kind. -func (i MemoryImport) Kind() ImportDescriptorType { +func (MemoryImport) Kind() ImportDescriptorType { return MemoryImportType } @@ -306,7 +306,7 @@ func (i MemoryImport) String() string { } // Kind returns the table import type kind. -func (i TableImport) Kind() ImportDescriptorType { +func (TableImport) Kind() ImportDescriptorType { return TableImportType } @@ -315,7 +315,7 @@ func (i TableImport) String() string { } // Kind returns the global import type kind. -func (i GlobalImport) Kind() ImportDescriptorType { +func (GlobalImport) Kind() ImportDescriptorType { return GlobalImportType } diff --git a/vendor/github.com/open-policy-agent/opa/loader/doc.go b/vendor/github.com/open-policy-agent/opa/loader/doc.go new file mode 100644 index 00000000000..9f60920d955 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/loader/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package loader diff --git a/vendor/github.com/open-policy-agent/opa/loader/errors.go b/vendor/github.com/open-policy-agent/opa/loader/errors.go index b8aafb14210..8dc70b86738 100644 --- a/vendor/github.com/open-policy-agent/opa/loader/errors.go +++ b/vendor/github.com/open-policy-agent/opa/loader/errors.go @@ -5,58 +5,8 @@ package loader import ( - "fmt" - "strings" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/loader" ) // Errors is a wrapper for multiple loader errors. -type Errors []error - -func (e Errors) Error() string { - if len(e) == 0 { - return "no error(s)" - } - if len(e) == 1 { - return "1 error occurred during loading: " + e[0].Error() - } - buf := make([]string, len(e)) - for i := range buf { - buf[i] = e[i].Error() - } - return fmt.Sprintf("%v errors occurred during loading:\n", len(e)) + strings.Join(buf, "\n") -} - -func (e *Errors) add(err error) { - if errs, ok := err.(ast.Errors); ok { - for i := range errs { - *e = append(*e, errs[i]) - } - } else { - *e = append(*e, err) - } -} - -type unsupportedDocumentType string - -func (path unsupportedDocumentType) Error() string { - return string(path) + ": document must be of type object" -} - -type unrecognizedFile string - -func (path unrecognizedFile) Error() string { - return string(path) + ": can't recognize file type" -} - -func isUnrecognizedFile(err error) bool { - _, ok := err.(unrecognizedFile) - return ok -} - -type mergeError string - -func (e mergeError) Error() string { - return string(e) + ": merge error" -} +type Errors = v1.Errors diff --git a/vendor/github.com/open-policy-agent/opa/loader/loader.go b/vendor/github.com/open-policy-agent/opa/loader/loader.go index 461639ed19b..9b2f91d4e9f 100644 --- a/vendor/github.com/open-policy-agent/opa/loader/loader.go +++ b/vendor/github.com/open-policy-agent/opa/loader/loader.go @@ -6,478 +6,74 @@ package loader import ( - "bytes" - "fmt" - "io" "io/fs" "os" - "path/filepath" - "sort" "strings" - "sigs.k8s.io/yaml" - "github.com/open-policy-agent/opa/ast" - astJSON "github.com/open-policy-agent/opa/ast/json" "github.com/open-policy-agent/opa/bundle" - fileurl "github.com/open-policy-agent/opa/internal/file/url" - "github.com/open-policy-agent/opa/internal/merge" - "github.com/open-policy-agent/opa/loader/filter" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/inmem" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/loader" ) // Result represents the result of successfully loading zero or more files. -type Result struct { - Documents map[string]interface{} - Modules map[string]*RegoFile - path []string -} - -// ParsedModules returns the parsed modules stored on the result. -func (l *Result) ParsedModules() map[string]*ast.Module { - modules := make(map[string]*ast.Module) - for _, module := range l.Modules { - modules[module.Name] = module.Parsed - } - return modules -} - -// Compiler returns a Compiler object with the compiled modules from this loader -// result. -func (l *Result) Compiler() (*ast.Compiler, error) { - compiler := ast.NewCompiler() - compiler.Compile(l.ParsedModules()) - if compiler.Failed() { - return nil, compiler.Errors - } - return compiler, nil -} - -// Store returns a Store object with the documents from this loader result. -func (l *Result) Store() (storage.Store, error) { - return l.StoreWithOpts() -} - -// StoreWithOpts returns a Store object with the documents from this loader result, -// instantiated with the passed options. -func (l *Result) StoreWithOpts(opts ...inmem.Opt) (storage.Store, error) { - return inmem.NewFromObjectWithOpts(l.Documents, opts...), nil -} +type Result = v1.Result // RegoFile represents the result of loading a single Rego source file. -type RegoFile struct { - Name string - Parsed *ast.Module - Raw []byte -} +type RegoFile = v1.RegoFile // Filter defines the interface for filtering files during loading. If the // filter returns true, the file should be excluded from the result. -type Filter = filter.LoaderFilter +type Filter = v1.Filter // GlobExcludeName excludes files and directories whose names do not match the // shell style pattern at minDepth or greater. func GlobExcludeName(pattern string, minDepth int) Filter { - return func(_ string, info fs.FileInfo, depth int) bool { - match, _ := filepath.Match(pattern, info.Name()) - return match && depth >= minDepth - } + return v1.GlobExcludeName(pattern, minDepth) } // FileLoader defines an interface for loading OPA data files // and Rego policies. -type FileLoader interface { - All(paths []string) (*Result, error) - Filtered(paths []string, filter Filter) (*Result, error) - AsBundle(path string) (*bundle.Bundle, error) - WithReader(io.Reader) FileLoader - WithFS(fs.FS) FileLoader - WithMetrics(metrics.Metrics) FileLoader - WithFilter(Filter) FileLoader - WithBundleVerificationConfig(*bundle.VerificationConfig) FileLoader - WithSkipBundleVerification(bool) FileLoader - WithProcessAnnotation(bool) FileLoader - WithCapabilities(*ast.Capabilities) FileLoader - WithJSONOptions(*astJSON.Options) FileLoader - WithRegoVersion(ast.RegoVersion) FileLoader - WithFollowSymlinks(bool) FileLoader -} +type FileLoader = v1.FileLoader // NewFileLoader returns a new FileLoader instance. func NewFileLoader() FileLoader { - return &fileLoader{ - metrics: metrics.New(), - files: make(map[string]bundle.FileInfo), - } -} - -type fileLoader struct { - metrics metrics.Metrics - filter Filter - bvc *bundle.VerificationConfig - skipVerify bool - files map[string]bundle.FileInfo - opts ast.ParserOptions - fsys fs.FS - reader io.Reader - followSymlinks bool -} - -// WithFS provides an fs.FS to use for loading files. You can pass nil to -// use plain IO calls (e.g. os.Open, os.Stat, etc.), this is the default -// behaviour. -func (fl *fileLoader) WithFS(fsys fs.FS) FileLoader { - fl.fsys = fsys - return fl -} - -// WithReader provides an io.Reader to use for loading the bundle tarball. -// An io.Reader passed via WithReader takes precedence over an fs.FS passed -// via WithFS. -func (fl *fileLoader) WithReader(rdr io.Reader) FileLoader { - fl.reader = rdr - return fl -} - -// WithMetrics provides the metrics instance to use while loading -func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader { - fl.metrics = m - return fl -} - -// WithFilter specifies the filter object to use to filter files while loading -func (fl *fileLoader) WithFilter(filter Filter) FileLoader { - fl.filter = filter - return fl -} - -// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle -func (fl *fileLoader) WithBundleVerificationConfig(config *bundle.VerificationConfig) FileLoader { - fl.bvc = config - return fl -} - -// WithSkipBundleVerification skips verification of a signed bundle -func (fl *fileLoader) WithSkipBundleVerification(skipVerify bool) FileLoader { - fl.skipVerify = skipVerify - return fl -} - -// WithProcessAnnotation enables or disables processing of schema annotations on rules -func (fl *fileLoader) WithProcessAnnotation(processAnnotation bool) FileLoader { - fl.opts.ProcessAnnotation = processAnnotation - return fl -} - -// WithCapabilities sets the supported capabilities when loading the files -func (fl *fileLoader) WithCapabilities(caps *ast.Capabilities) FileLoader { - fl.opts.Capabilities = caps - return fl -} - -// WithJSONOptions sets the JSONOptions for use when parsing files -func (fl *fileLoader) WithJSONOptions(opts *astJSON.Options) FileLoader { - fl.opts.JSONOptions = opts - return fl -} - -// WithRegoVersion sets the ast.RegoVersion to use when parsing and compiling modules. -func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader { - fl.opts.RegoVersion = version - return fl -} - -// WithFollowSymlinks enables or disables following symlinks when loading files -func (fl *fileLoader) WithFollowSymlinks(followSymlinks bool) FileLoader { - fl.followSymlinks = followSymlinks - return fl -} - -// All returns a Result object loaded (recursively) from the specified paths. -func (fl fileLoader) All(paths []string) (*Result, error) { - return fl.Filtered(paths, nil) -} - -// Filtered returns a Result object loaded (recursively) from the specified -// paths while applying the given filters. If any filter returns true, the -// file/directory is excluded. -func (fl fileLoader) Filtered(paths []string, filter Filter) (*Result, error) { - return all(fl.fsys, paths, filter, func(curr *Result, path string, depth int) error { - - var ( - bs []byte - err error - ) - if fl.fsys != nil { - bs, err = fs.ReadFile(fl.fsys, path) - } else { - bs, err = os.ReadFile(path) - } - if err != nil { - return err - } - - result, err := loadKnownTypes(path, bs, fl.metrics, fl.opts) - if err != nil { - if !isUnrecognizedFile(err) { - return err - } - if depth > 0 { - return nil - } - result, err = loadFileForAnyType(path, bs, fl.metrics, fl.opts) - if err != nil { - return err - } - } - - return curr.merge(path, result) - }) -} - -// AsBundle loads a path as a bundle. If it is a single file -// it will be treated as a normal tarball bundle. If a directory -// is supplied it will be loaded as an unzipped bundle tree. -func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, err - } - - if err := checkForUNCPath(path); err != nil { - return nil, err - } - - var bundleLoader bundle.DirectoryLoader - var isDir bool - if fl.reader != nil { - bundleLoader = bundle.NewTarballLoaderWithBaseURL(fl.reader, path).WithFilter(fl.filter) - } else { - bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter) - } - - if err != nil { - return nil, err - } - bundleLoader = bundleLoader.WithFollowSymlinks(fl.followSymlinks) - - br := bundle.NewCustomReader(bundleLoader). - WithMetrics(fl.metrics). - WithBundleVerificationConfig(fl.bvc). - WithSkipBundleVerification(fl.skipVerify). - WithProcessAnnotations(fl.opts.ProcessAnnotation). - WithCapabilities(fl.opts.Capabilities). - WithJSONOptions(fl.opts.JSONOptions). - WithFollowSymlinks(fl.followSymlinks). - WithRegoVersion(fl.opts.RegoVersion) - - // For bundle directories add the full path in front of module file names - // to simplify debugging. - if isDir { - br.WithBaseDir(path) - } - - b, err := br.Read() - if err != nil { - err = fmt.Errorf("bundle %s: %w", path, err) - } - - return &b, err + return v1.NewFileLoader().WithRegoVersion(ast.DefaultRegoVersion) } // GetBundleDirectoryLoader returns a bundle directory loader which can be used to load // files in the directory func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) { - return GetBundleDirectoryLoaderFS(nil, path, nil) + return v1.GetBundleDirectoryLoader(path) } // GetBundleDirectoryLoaderWithFilter returns a bundle directory loader which can be used to load // files in the directory after applying the given filter. func GetBundleDirectoryLoaderWithFilter(path string, filter Filter) (bundle.DirectoryLoader, bool, error) { - return GetBundleDirectoryLoaderFS(nil, path, filter) + return v1.GetBundleDirectoryLoaderWithFilter(path, filter) } // GetBundleDirectoryLoaderFS returns a bundle directory loader which can be used to load // files in the directory. func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle.DirectoryLoader, bool, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, false, err - } - - if err := checkForUNCPath(path); err != nil { - return nil, false, err - } - - var fi fs.FileInfo - if fsys != nil { - fi, err = fs.Stat(fsys, path) - } else { - fi, err = os.Stat(path) - } - if err != nil { - return nil, false, fmt.Errorf("error reading %q: %s", path, err) - } - - var bundleLoader bundle.DirectoryLoader - if fi.IsDir() { - if fsys != nil { - bundleLoader = bundle.NewFSLoaderWithRoot(fsys, path) - } else { - bundleLoader = bundle.NewDirectoryLoader(path) - } - } else { - var fh fs.File - if fsys != nil { - fh, err = fsys.Open(path) - } else { - fh, err = os.Open(path) - } - if err != nil { - return nil, false, err - } - bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path) - } - - if filter != nil { - bundleLoader = bundleLoader.WithFilter(filter) - } - return bundleLoader, fi.IsDir(), nil + return v1.GetBundleDirectoryLoaderFS(fsys, path, filter) } // FilteredPaths is the same as FilterPathsFS using the current diretory file // system func FilteredPaths(paths []string, filter Filter) ([]string, error) { - return FilteredPathsFS(nil, paths, filter) + return v1.FilteredPaths(paths, filter) } // FilteredPathsFS return a list of files from the specified // paths while applying the given filters. If any filter returns true, the // file/directory is excluded. func FilteredPathsFS(fsys fs.FS, paths []string, filter Filter) ([]string, error) { - result := []string{} - - _, err := all(fsys, paths, filter, func(_ *Result, path string, _ int) error { - result = append(result, path) - return nil - }) - if err != nil { - return nil, err - } - return result, nil + return v1.FilteredPathsFS(fsys, paths, filter) } // Schemas loads a schema set from the specified file path. func Schemas(schemaPath string) (*ast.SchemaSet, error) { - - var errs Errors - ss, err := loadSchemas(schemaPath) - if err != nil { - errs.add(err) - return nil, errs - } - - return ss, nil -} - -func loadSchemas(schemaPath string) (*ast.SchemaSet, error) { - - if schemaPath == "" { - return nil, nil - } - - ss := ast.NewSchemaSet() - path, err := fileurl.Clean(schemaPath) - if err != nil { - return nil, err - } - - info, err := os.Stat(path) - if err != nil { - return nil, err - } - - // Handle single file case. - if !info.IsDir() { - schema, err := loadOneSchema(path) - if err != nil { - return nil, err - } - ss.Put(ast.SchemaRootRef, schema) - return ss, nil - - } - - // Handle directory case. - rootDir := path - - err = filepath.Walk(path, - func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } else if info.IsDir() { - return nil - } - - schema, err := loadOneSchema(path) - if err != nil { - return err - } - - relPath, err := filepath.Rel(rootDir, path) - if err != nil { - return err - } - - key := getSchemaSetByPathKey(relPath) - ss.Put(key, schema) - return nil - }) - - if err != nil { - return nil, err - } - - return ss, nil -} - -func getSchemaSetByPathKey(path string) ast.Ref { - - front := filepath.Dir(path) - last := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) - - var parts []string - - if front != "." { - parts = append(strings.Split(filepath.ToSlash(front), "/"), last) - } else { - parts = []string{last} - } - - key := make(ast.Ref, 1+len(parts)) - key[0] = ast.SchemaRootDocument - for i := range parts { - key[i+1] = ast.StringTerm(parts[i]) - } - - return key -} - -func loadOneSchema(path string) (interface{}, error) { - bs, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - var schema interface{} - if err := util.Unmarshal(bs, &schema); err != nil { - return nil, fmt.Errorf("%s: %w", path, err) - } - - return schema, nil + return v1.Schemas(schemaPath) } // All returns a Result object loaded (recursively) from the specified paths. @@ -517,321 +113,33 @@ func Rego(path string) (*RegoFile, error) { // RegoWithOpts returns a RegoFile object loaded from the given path. func RegoWithOpts(path string, opts ast.ParserOptions) (*RegoFile, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, err + if opts.RegoVersion == ast.RegoUndefined { + opts.RegoVersion = ast.DefaultRegoVersion } - bs, err := os.ReadFile(path) - if err != nil { - return nil, err - } - return loadRego(path, bs, metrics.New(), opts) + + return v1.RegoWithOpts(path, opts) } // CleanPath returns the normalized version of a path that can be used as an identifier. func CleanPath(path string) string { - return strings.Trim(path, "/") + return v1.CleanPath(path) } // Paths returns a sorted list of files contained at path. If recurse is true // and path is a directory, then Paths will walk the directory structure // recursively and list files at each level. func Paths(path string, recurse bool) (paths []string, err error) { - path, err = fileurl.Clean(path) - if err != nil { - return nil, err - } - err = filepath.Walk(path, func(f string, _ os.FileInfo, _ error) error { - if !recurse { - if path != f && path != filepath.Dir(f) { - return filepath.SkipDir - } - } - paths = append(paths, f) - return nil - }) - return paths, err + return v1.Paths(path, recurse) } // Dirs resolves filepaths to directories. It will return a list of unique // directories. func Dirs(paths []string) []string { - unique := map[string]struct{}{} - - for _, path := range paths { - // TODO: /dir/dir will register top level directory /dir - dir := filepath.Dir(path) - unique[dir] = struct{}{} - } - - u := make([]string, 0, len(unique)) - for k := range unique { - u = append(u, k) - } - sort.Strings(u) - return u + return v1.Dirs(paths) } // SplitPrefix returns a tuple specifying the document prefix and the file // path. func SplitPrefix(path string) ([]string, string) { - // Non-prefixed URLs can be returned without modification and their contents - // can be rooted directly under data. - if strings.Index(path, "://") == strings.Index(path, ":") { - return nil, path - } - parts := strings.SplitN(path, ":", 2) - if len(parts) == 2 && len(parts[0]) > 0 { - return strings.Split(parts[0], "."), parts[1] - } - return nil, path -} - -func (l *Result) merge(path string, result interface{}) error { - switch result := result.(type) { - case bundle.Bundle: - for _, module := range result.Modules { - l.Modules[module.Path] = &RegoFile{ - Name: module.Path, - Parsed: module.Parsed, - Raw: module.Raw, - } - } - return l.mergeDocument(path, result.Data) - case *RegoFile: - l.Modules[CleanPath(path)] = result - return nil - default: - return l.mergeDocument(path, result) - } -} - -func (l *Result) mergeDocument(path string, doc interface{}) error { - obj, ok := makeDir(l.path, doc) - if !ok { - return unsupportedDocumentType(path) - } - merged, ok := merge.InterfaceMaps(l.Documents, obj) - if !ok { - return mergeError(path) - } - for k := range merged { - l.Documents[k] = merged[k] - } - return nil -} - -func (l *Result) withParent(p string) *Result { - path := append(l.path, p) - return &Result{ - Documents: l.Documents, - Modules: l.Modules, - path: path, - } -} - -func newResult() *Result { - return &Result{ - Documents: map[string]interface{}{}, - Modules: map[string]*RegoFile{}, - } -} - -func all(fsys fs.FS, paths []string, filter Filter, f func(*Result, string, int) error) (*Result, error) { - errs := Errors{} - root := newResult() - - for _, path := range paths { - - // Paths can be prefixed with a string that specifies where content should be - // loaded under data. E.g., foo.bar:/path/to/some.json will load the content - // of some.json under {"foo": {"bar": ...}}. - loaded := root - prefix, path := SplitPrefix(path) - if len(prefix) > 0 { - for _, part := range prefix { - loaded = loaded.withParent(part) - } - } - - allRec(fsys, path, filter, &errs, loaded, 0, f) - } - - if len(errs) > 0 { - return nil, errs - } - - return root, nil -} - -func allRec(fsys fs.FS, path string, filter Filter, errors *Errors, loaded *Result, depth int, f func(*Result, string, int) error) { - - path, err := fileurl.Clean(path) - if err != nil { - errors.add(err) - return - } - - if err := checkForUNCPath(path); err != nil { - errors.add(err) - return - } - - var info fs.FileInfo - if fsys != nil { - info, err = fs.Stat(fsys, path) - } else { - info, err = os.Stat(path) - } - - if err != nil { - errors.add(err) - return - } - - if filter != nil && filter(path, info, depth) { - return - } - - if !info.IsDir() { - if err := f(loaded, path, depth); err != nil { - errors.add(err) - } - return - } - - // If we are recursing on directories then content must be loaded under path - // specified by directory hierarchy. - if depth > 0 { - loaded = loaded.withParent(info.Name()) - } - - var files []fs.DirEntry - if fsys != nil { - files, err = fs.ReadDir(fsys, path) - } else { - files, err = os.ReadDir(path) - } - if err != nil { - errors.add(err) - return - } - - for _, file := range files { - allRec(fsys, filepath.Join(path, file.Name()), filter, errors, loaded, depth+1, f) - } -} - -func loadKnownTypes(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { - switch filepath.Ext(path) { - case ".json": - return loadJSON(path, bs, m) - case ".rego": - return loadRego(path, bs, m, opts) - case ".yaml", ".yml": - return loadYAML(path, bs, m) - default: - if strings.HasSuffix(path, ".tar.gz") { - r, err := loadBundleFile(path, bs, m, opts) - if err != nil { - err = fmt.Errorf("bundle %s: %w", path, err) - } - return r, err - } - } - return nil, unrecognizedFile(path) -} - -func loadFileForAnyType(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { - module, err := loadRego(path, bs, m, opts) - if err == nil { - return module, nil - } - doc, err := loadJSON(path, bs, m) - if err == nil { - return doc, nil - } - doc, err = loadYAML(path, bs, m) - if err == nil { - return doc, nil - } - return nil, unrecognizedFile(path) -} - -func loadBundleFile(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (bundle.Bundle, error) { - tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path) - br := bundle.NewCustomReader(tl). - WithRegoVersion(opts.RegoVersion). - WithJSONOptions(opts.JSONOptions). - WithProcessAnnotations(opts.ProcessAnnotation). - WithMetrics(m). - WithSkipBundleVerification(true). - IncludeManifestInData(true) - return br.Read() -} - -func loadRego(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (*RegoFile, error) { - m.Timer(metrics.RegoModuleParse).Start() - var module *ast.Module - var err error - module, err = ast.ParseModuleWithOpts(path, string(bs), opts) - m.Timer(metrics.RegoModuleParse).Stop() - if err != nil { - return nil, err - } - result := &RegoFile{ - Name: path, - Parsed: module, - Raw: bs, - } - return result, nil -} - -func loadJSON(path string, bs []byte, m metrics.Metrics) (interface{}, error) { - m.Timer(metrics.RegoDataParse).Start() - var x interface{} - err := util.UnmarshalJSON(bs, &x) - m.Timer(metrics.RegoDataParse).Stop() - - if err != nil { - return nil, fmt.Errorf("%s: %w", path, err) - } - return x, nil -} - -func loadYAML(path string, bs []byte, m metrics.Metrics) (interface{}, error) { - m.Timer(metrics.RegoDataParse).Start() - bs, err := yaml.YAMLToJSON(bs) - m.Timer(metrics.RegoDataParse).Stop() - if err != nil { - return nil, fmt.Errorf("%v: error converting YAML to JSON: %v", path, err) - } - return loadJSON(path, bs, m) -} - -func makeDir(path []string, x interface{}) (map[string]interface{}, bool) { - if len(path) == 0 { - obj, ok := x.(map[string]interface{}) - if !ok { - return nil, false - } - return obj, true - } - return makeDir(path[:len(path)-1], map[string]interface{}{path[len(path)-1]: x}) -} - -// isUNC reports whether path is a UNC path. -func isUNC(path string) bool { - return len(path) > 1 && isSlash(path[0]) && isSlash(path[1]) -} - -func isSlash(c uint8) bool { - return c == '\\' || c == '/' -} - -func checkForUNCPath(path string) error { - if isUNC(path) { - return fmt.Errorf("UNC path read is not allowed: %s", path) - } - return nil + return v1.SplitPrefix(path) } diff --git a/vendor/github.com/open-policy-agent/opa/rego/doc.go b/vendor/github.com/open-policy-agent/opa/rego/doc.go new file mode 100644 index 00000000000..febe75696c5 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/rego/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package rego diff --git a/vendor/github.com/open-policy-agent/opa/rego/errors.go b/vendor/github.com/open-policy-agent/opa/rego/errors.go index dcc5e2679d1..bcbd2efeddf 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/errors.go +++ b/vendor/github.com/open-policy-agent/opa/rego/errors.go @@ -1,24 +1,17 @@ package rego +import v1 "github.com/open-policy-agent/opa/v1/rego" + // HaltError is an error type to return from a custom function implementation // that will abort the evaluation process (analogous to topdown.Halt). -type HaltError struct { - err error -} - -// Error delegates to the wrapped error -func (h *HaltError) Error() string { - return h.err.Error() -} +type HaltError = v1.HaltError // NewHaltError wraps an error such that the evaluation process will stop // when it occurs. func NewHaltError(err error) error { - return &HaltError{err: err} + return v1.NewHaltError(err) } // ErrorDetails interface is satisfied by an error that provides further // details. -type ErrorDetails interface { - Lines() []string -} +type ErrorDetails = v1.ErrorDetails diff --git a/vendor/github.com/open-policy-agent/opa/rego/plugins.go b/vendor/github.com/open-policy-agent/opa/rego/plugins.go index abaa910341e..38ef84416fb 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/plugins.go +++ b/vendor/github.com/open-policy-agent/opa/rego/plugins.go @@ -5,39 +5,13 @@ package rego import ( - "context" - "sync" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/ir" + v1 "github.com/open-policy-agent/opa/v1/rego" ) -var targetPlugins = map[string]TargetPlugin{} -var pluginMtx sync.Mutex - -type TargetPlugin interface { - IsTarget(string) bool - PrepareForEval(context.Context, *ir.Policy, ...PrepareOption) (TargetPluginEval, error) -} - -type TargetPluginEval interface { - Eval(context.Context, *EvalContext, ast.Value) (ast.Value, error) -} +type TargetPlugin = v1.TargetPlugin -func (r *Rego) targetPlugin(tgt string) TargetPlugin { - for _, p := range targetPlugins { - if p.IsTarget(tgt) { - return p - } - } - return nil -} +type TargetPluginEval = v1.TargetPluginEval func RegisterPlugin(name string, p TargetPlugin) { - pluginMtx.Lock() - defer pluginMtx.Unlock() - if _, ok := targetPlugins[name]; ok { - panic("plugin already registered " + name) - } - targetPlugins[name] = p + v1.RegisterPlugin(name, p) } diff --git a/vendor/github.com/open-policy-agent/opa/rego/rego.go b/vendor/github.com/open-policy-agent/opa/rego/rego.go index 64b4b9b93e1..e6af30c39c5 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/rego.go +++ b/vendor/github.com/open-policy-agent/opa/rego/rego.go @@ -6,958 +6,367 @@ package rego import ( - "bytes" - "context" - "errors" - "fmt" "io" - "strings" "time" "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/bundle" - bundleUtils "github.com/open-policy-agent/opa/internal/bundle" - "github.com/open-policy-agent/opa/internal/compiler/wasm" - "github.com/open-policy-agent/opa/internal/future" - "github.com/open-policy-agent/opa/internal/planner" - "github.com/open-policy-agent/opa/internal/rego/opa" - "github.com/open-policy-agent/opa/internal/wasm/encoding" - "github.com/open-policy-agent/opa/ir" "github.com/open-policy-agent/opa/loader" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/plugins" - "github.com/open-policy-agent/opa/resolver" "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/inmem" - "github.com/open-policy-agent/opa/topdown" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" -) - -const ( - defaultPartialNamespace = "partial" - wasmVarPrefix = "^" -) - -// nolint: deadcode,varcheck -const ( - targetWasm = "wasm" - targetRego = "rego" + "github.com/open-policy-agent/opa/v1/metrics" + v1 "github.com/open-policy-agent/opa/v1/rego" + "github.com/open-policy-agent/opa/v1/resolver" + "github.com/open-policy-agent/opa/v1/topdown" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" ) // CompileResult represents the result of compiling a Rego query, zero or more // Rego modules, and arbitrary contextual data into an executable. -type CompileResult struct { - Bytes []byte `json:"bytes"` -} +type CompileResult = v1.CompileResult // PartialQueries contains the queries and support modules produced by partial // evaluation. -type PartialQueries struct { - Queries []ast.Body `json:"queries,omitempty"` - Support []*ast.Module `json:"modules,omitempty"` -} +type PartialQueries = v1.PartialQueries // PartialResult represents the result of partial evaluation. The result can be // used to generate a new query that can be run when inputs are known. -type PartialResult struct { - compiler *ast.Compiler - store storage.Store - body ast.Body - builtinDecls map[string]*ast.Builtin - builtinFuncs map[string]*topdown.Builtin -} - -// Rego returns an object that can be evaluated to produce a query result. -func (pr PartialResult) Rego(options ...func(*Rego)) *Rego { - options = append(options, Compiler(pr.compiler), Store(pr.store), ParsedQuery(pr.body)) - r := New(options...) - - // Propagate any custom builtins. - for k, v := range pr.builtinDecls { - r.builtinDecls[k] = v - } - for k, v := range pr.builtinFuncs { - r.builtinFuncs[k] = v - } - return r -} - -// preparedQuery is a wrapper around a Rego object which has pre-processed -// state stored on it. Once prepared there are a more limited number of actions -// that can be taken with it. It will, however, be able to evaluate faster since -// it will not have to re-parse or compile as much. -type preparedQuery struct { - r *Rego - cfg *PrepareConfig -} +type PartialResult = v1.PartialResult // EvalContext defines the set of options allowed to be set at evaluation // time. Any other options will need to be set on a new Rego object. -type EvalContext struct { - hasInput bool - time time.Time - seed io.Reader - rawInput *interface{} - parsedInput ast.Value - metrics metrics.Metrics - txn storage.Transaction - instrument bool - instrumentation *topdown.Instrumentation - partialNamespace string - queryTracers []topdown.QueryTracer - compiledQuery compiledQuery - unknowns []string - disableInlining []ast.Ref - parsedUnknowns []*ast.Term - indexing bool - earlyExit bool - interQueryBuiltinCache cache.InterQueryCache - interQueryBuiltinValueCache cache.InterQueryValueCache - ndBuiltinCache builtins.NDBCache - resolvers []refResolver - sortSets bool - copyMaps bool - printHook print.Hook - capabilities *ast.Capabilities - strictBuiltinErrors bool - virtualCache topdown.VirtualCache -} - -func (e *EvalContext) RawInput() *interface{} { - return e.rawInput -} - -func (e *EvalContext) ParsedInput() ast.Value { - return e.parsedInput -} - -func (e *EvalContext) Time() time.Time { - return e.time -} - -func (e *EvalContext) Seed() io.Reader { - return e.seed -} - -func (e *EvalContext) InterQueryBuiltinCache() cache.InterQueryCache { - return e.interQueryBuiltinCache -} - -func (e *EvalContext) InterQueryBuiltinValueCache() cache.InterQueryValueCache { - return e.interQueryBuiltinValueCache -} - -func (e *EvalContext) PrintHook() print.Hook { - return e.printHook -} - -func (e *EvalContext) Metrics() metrics.Metrics { - return e.metrics -} - -func (e *EvalContext) StrictBuiltinErrors() bool { - return e.strictBuiltinErrors -} - -func (e *EvalContext) NDBCache() builtins.NDBCache { - return e.ndBuiltinCache -} - -func (e *EvalContext) CompiledQuery() ast.Body { - return e.compiledQuery.query -} - -func (e *EvalContext) Capabilities() *ast.Capabilities { - return e.capabilities -} - -func (e *EvalContext) Transaction() storage.Transaction { - return e.txn -} +type EvalContext = v1.EvalContext // EvalOption defines a function to set an option on an EvalConfig -type EvalOption func(*EvalContext) +type EvalOption = v1.EvalOption // EvalInput configures the input for a Prepared Query's evaluation func EvalInput(input interface{}) EvalOption { - return func(e *EvalContext) { - e.rawInput = &input - e.hasInput = true - } + return v1.EvalInput(input) } // EvalParsedInput configures the input for a Prepared Query's evaluation func EvalParsedInput(input ast.Value) EvalOption { - return func(e *EvalContext) { - e.parsedInput = input - e.hasInput = true - } + return v1.EvalParsedInput(input) } // EvalMetrics configures the metrics for a Prepared Query's evaluation func EvalMetrics(metric metrics.Metrics) EvalOption { - return func(e *EvalContext) { - e.metrics = metric - } + return v1.EvalMetrics(metric) } // EvalTransaction configures the Transaction for a Prepared Query's evaluation func EvalTransaction(txn storage.Transaction) EvalOption { - return func(e *EvalContext) { - e.txn = txn - } + return v1.EvalTransaction(txn) } // EvalInstrument enables or disables instrumenting for a Prepared Query's evaluation func EvalInstrument(instrument bool) EvalOption { - return func(e *EvalContext) { - e.instrument = instrument - } + return v1.EvalInstrument(instrument) } // EvalTracer configures a tracer for a Prepared Query's evaluation // Deprecated: Use EvalQueryTracer instead. func EvalTracer(tracer topdown.Tracer) EvalOption { - return func(e *EvalContext) { - if tracer != nil { - e.queryTracers = append(e.queryTracers, topdown.WrapLegacyTracer(tracer)) - } - } + return v1.EvalTracer(tracer) } // EvalQueryTracer configures a tracer for a Prepared Query's evaluation func EvalQueryTracer(tracer topdown.QueryTracer) EvalOption { - return func(e *EvalContext) { - if tracer != nil { - e.queryTracers = append(e.queryTracers, tracer) - } - } + return v1.EvalQueryTracer(tracer) } // EvalPartialNamespace returns an argument that sets the namespace to use for // partial evaluation results. The namespace must be a valid package path // component. func EvalPartialNamespace(ns string) EvalOption { - return func(e *EvalContext) { - e.partialNamespace = ns - } + return v1.EvalPartialNamespace(ns) } // EvalUnknowns returns an argument that sets the values to treat as // unknown during partial evaluation. func EvalUnknowns(unknowns []string) EvalOption { - return func(e *EvalContext) { - e.unknowns = unknowns - } + return v1.EvalUnknowns(unknowns) } // EvalDisableInlining returns an argument that adds a set of paths to exclude from // partial evaluation inlining. func EvalDisableInlining(paths []ast.Ref) EvalOption { - return func(e *EvalContext) { - e.disableInlining = paths - } + return v1.EvalDisableInlining(paths) } // EvalParsedUnknowns returns an argument that sets the values to treat // as unknown during partial evaluation. func EvalParsedUnknowns(unknowns []*ast.Term) EvalOption { - return func(e *EvalContext) { - e.parsedUnknowns = unknowns - } + return v1.EvalParsedUnknowns(unknowns) } // EvalRuleIndexing will disable indexing optimizations for the // evaluation. This should only be used when tracing in debug mode. func EvalRuleIndexing(enabled bool) EvalOption { - return func(e *EvalContext) { - e.indexing = enabled - } + return v1.EvalRuleIndexing(enabled) } // EvalEarlyExit will disable 'early exit' optimizations for the // evaluation. This should only be used when tracing in debug mode. func EvalEarlyExit(enabled bool) EvalOption { - return func(e *EvalContext) { - e.earlyExit = enabled - } + return v1.EvalEarlyExit(enabled) } // EvalTime sets the wall clock time to use during policy evaluation. // time.now_ns() calls will return this value. func EvalTime(x time.Time) EvalOption { - return func(e *EvalContext) { - e.time = x - } + return v1.EvalTime(x) } // EvalSeed sets a reader that will seed randomization required by built-in functions. // If a seed is not provided crypto/rand.Reader is used. func EvalSeed(r io.Reader) EvalOption { - return func(e *EvalContext) { - e.seed = r - } + return v1.EvalSeed(r) } // EvalInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize // during evaluation. func EvalInterQueryBuiltinCache(c cache.InterQueryCache) EvalOption { - return func(e *EvalContext) { - e.interQueryBuiltinCache = c - } + return v1.EvalInterQueryBuiltinCache(c) } // EvalInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize // during evaluation. func EvalInterQueryBuiltinValueCache(c cache.InterQueryValueCache) EvalOption { - return func(e *EvalContext) { - e.interQueryBuiltinValueCache = c - } + return v1.EvalInterQueryBuiltinValueCache(c) } // EvalNDBuiltinCache sets the non-deterministic builtin cache that built-in functions can // use during evaluation. func EvalNDBuiltinCache(c builtins.NDBCache) EvalOption { - return func(e *EvalContext) { - e.ndBuiltinCache = c - } + return v1.EvalNDBuiltinCache(c) } // EvalResolver sets a Resolver for a specified ref path for this evaluation. func EvalResolver(ref ast.Ref, r resolver.Resolver) EvalOption { - return func(e *EvalContext) { - e.resolvers = append(e.resolvers, refResolver{ref, r}) - } + return v1.EvalResolver(ref, r) } // EvalSortSets causes the evaluator to sort sets before returning them as JSON arrays. func EvalSortSets(yes bool) EvalOption { - return func(e *EvalContext) { - e.sortSets = yes - } + return v1.EvalSortSets(yes) } // EvalCopyMaps causes the evaluator to copy `map[string]interface{}`s before returning them. func EvalCopyMaps(yes bool) EvalOption { - return func(e *EvalContext) { - e.copyMaps = yes - } + return v1.EvalCopyMaps(yes) } // EvalPrintHook sets the object to use for handling print statement outputs. func EvalPrintHook(ph print.Hook) EvalOption { - return func(e *EvalContext) { - e.printHook = ph - } + return v1.EvalPrintHook(ph) } // EvalVirtualCache sets the topdown.VirtualCache to use for evaluation. This is // optional, and if not set, the default cache is used. func EvalVirtualCache(vc topdown.VirtualCache) EvalOption { - return func(e *EvalContext) { - e.virtualCache = vc - } -} - -func (pq preparedQuery) Modules() map[string]*ast.Module { - mods := make(map[string]*ast.Module) - - for name, mod := range pq.r.parsedModules { - mods[name] = mod - } - - for _, b := range pq.r.bundles { - for _, mod := range b.Modules { - mods[mod.Path] = mod.Parsed - } - } - - return mods -} - -// newEvalContext creates a new EvalContext overlaying any EvalOptions over top -// the Rego object on the preparedQuery. The returned function should be called -// once the evaluation is complete to close any transactions that might have -// been opened. -func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) { - ectx := &EvalContext{ - hasInput: false, - rawInput: nil, - parsedInput: nil, - metrics: nil, - txn: nil, - instrument: false, - instrumentation: nil, - partialNamespace: pq.r.partialNamespace, - queryTracers: nil, - unknowns: pq.r.unknowns, - parsedUnknowns: pq.r.parsedUnknowns, - compiledQuery: compiledQuery{}, - indexing: true, - earlyExit: true, - resolvers: pq.r.resolvers, - printHook: pq.r.printHook, - capabilities: pq.r.capabilities, - strictBuiltinErrors: pq.r.strictBuiltinErrors, - } - - for _, o := range options { - o(ectx) - } - - if ectx.metrics == nil { - ectx.metrics = metrics.New() - } - - if ectx.instrument { - ectx.instrumentation = topdown.NewInstrumentation(ectx.metrics) - } - - // Default to an empty "finish" function - finishFunc := func(context.Context) {} - - var err error - ectx.disableInlining, err = parseStringsToRefs(pq.r.disableInlining) - if err != nil { - return nil, finishFunc, err - } - - if ectx.txn == nil { - ectx.txn, err = pq.r.store.NewTransaction(ctx) - if err != nil { - return nil, finishFunc, err - } - finishFunc = func(ctx context.Context) { - pq.r.store.Abort(ctx, ectx.txn) - } - } - - // If we didn't get an input specified in the Eval options - // then fall back to the Rego object's input fields. - if !ectx.hasInput { - ectx.rawInput = pq.r.rawInput - ectx.parsedInput = pq.r.parsedInput - } - - if ectx.parsedInput == nil { - if ectx.rawInput == nil { - // Fall back to the original Rego objects input if none was specified - // Note that it could still be nil - ectx.rawInput = pq.r.rawInput - } - - if pq.r.targetPlugin(pq.r.target) == nil && // no plugin claims this target - pq.r.target != targetWasm { - ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics) - if err != nil { - return nil, finishFunc, err - } - } - } - - return ectx, finishFunc, nil + return v1.EvalVirtualCache(vc) } // PreparedEvalQuery holds the prepared Rego state that has been pre-processed // for subsequent evaluations. -type PreparedEvalQuery struct { - preparedQuery -} - -// Eval evaluates this PartialResult's Rego object with additional eval options -// and returns a ResultSet. -// If options are provided they will override the original Rego options respective value. -// The original Rego object transaction will *not* be re-used. A new transaction will be opened -// if one is not provided with an EvalOption. -func (pq PreparedEvalQuery) Eval(ctx context.Context, options ...EvalOption) (ResultSet, error) { - ectx, finish, err := pq.newEvalContext(ctx, options) - if err != nil { - return nil, err - } - defer finish(ctx) - - ectx.compiledQuery = pq.r.compiledQueries[evalQueryType] - - return pq.r.eval(ctx, ectx) -} +type PreparedEvalQuery = v1.PreparedEvalQuery // PreparedPartialQuery holds the prepared Rego state that has been pre-processed // for partial evaluations. -type PreparedPartialQuery struct { - preparedQuery -} - -// Partial runs partial evaluation on the prepared query and returns the result. -// The original Rego object transaction will *not* be re-used. A new transaction will be opened -// if one is not provided with an EvalOption. -func (pq PreparedPartialQuery) Partial(ctx context.Context, options ...EvalOption) (*PartialQueries, error) { - ectx, finish, err := pq.newEvalContext(ctx, options) - if err != nil { - return nil, err - } - defer finish(ctx) - - ectx.compiledQuery = pq.r.compiledQueries[partialQueryType] - - return pq.r.partial(ctx, ectx) -} +type PreparedPartialQuery = v1.PreparedPartialQuery // Errors represents a collection of errors returned when evaluating Rego. -type Errors []error - -func (errs Errors) Error() string { - if len(errs) == 0 { - return "no error" - } - if len(errs) == 1 { - return fmt.Sprintf("1 error occurred: %v", errs[0].Error()) - } - buf := []string{fmt.Sprintf("%v errors occurred", len(errs))} - for _, err := range errs { - buf = append(buf, err.Error()) - } - return strings.Join(buf, "\n") -} - -var errPartialEvaluationNotEffective = errors.New("partial evaluation not effective") +type Errors = v1.Errors // IsPartialEvaluationNotEffectiveErr returns true if err is an error returned by // this package to indicate that partial evaluation was ineffective. func IsPartialEvaluationNotEffectiveErr(err error) bool { - errs, ok := err.(Errors) - if !ok { - return false - } - return len(errs) == 1 && errs[0] == errPartialEvaluationNotEffective -} - -type compiledQuery struct { - query ast.Body - compiler ast.QueryCompiler -} - -type queryType int - -// Define a query type for each of the top level Rego -// API's that compile queries differently. -const ( - evalQueryType queryType = iota - partialResultQueryType - partialQueryType - compileQueryType -) - -type loadPaths struct { - paths []string - filter loader.Filter + return v1.IsPartialEvaluationNotEffectiveErr(err) } // Rego constructs a query and can be evaluated to obtain results. -type Rego struct { - query string - parsedQuery ast.Body - compiledQueries map[queryType]compiledQuery - pkg string - parsedPackage *ast.Package - imports []string - parsedImports []*ast.Import - rawInput *interface{} - parsedInput ast.Value - unknowns []string - parsedUnknowns []*ast.Term - disableInlining []string - shallowInlining bool - skipPartialNamespace bool - partialNamespace string - modules []rawModule - parsedModules map[string]*ast.Module - compiler *ast.Compiler - store storage.Store - ownStore bool - ownStoreReadAst bool - txn storage.Transaction - metrics metrics.Metrics - queryTracers []topdown.QueryTracer - tracebuf *topdown.BufferTracer - trace bool - instrumentation *topdown.Instrumentation - instrument bool - capture map[*ast.Expr]ast.Var // map exprs to generated capture vars - termVarID int - dump io.Writer - runtime *ast.Term - time time.Time - seed io.Reader - capabilities *ast.Capabilities - builtinDecls map[string]*ast.Builtin - builtinFuncs map[string]*topdown.Builtin - unsafeBuiltins map[string]struct{} - loadPaths loadPaths - bundlePaths []string - bundles map[string]*bundle.Bundle - skipBundleVerification bool - interQueryBuiltinCache cache.InterQueryCache - interQueryBuiltinValueCache cache.InterQueryValueCache - ndBuiltinCache builtins.NDBCache - strictBuiltinErrors bool - builtinErrorList *[]topdown.Error - resolvers []refResolver - schemaSet *ast.SchemaSet - target string // target type (wasm, rego, etc.) - opa opa.EvalEngine - generateJSON func(*ast.Term, *EvalContext) (interface{}, error) - printHook print.Hook - enablePrintStatements bool - distributedTacingOpts tracing.Options - strict bool - pluginMgr *plugins.Manager - plugins []TargetPlugin - targetPrepState TargetPluginEval - regoVersion ast.RegoVersion -} +type Rego = v1.Rego // Function represents a built-in function that is callable in Rego. -type Function struct { - Name string - Description string - Decl *types.Function - Memoize bool - Nondeterministic bool -} +type Function = v1.Function // BuiltinContext contains additional attributes from the evaluator that // built-in functions can use, e.g., the request context.Context, caches, etc. -type BuiltinContext = topdown.BuiltinContext +type BuiltinContext = v1.BuiltinContext type ( // Builtin1 defines a built-in function that accepts 1 argument. - Builtin1 func(bctx BuiltinContext, op1 *ast.Term) (*ast.Term, error) + Builtin1 = v1.Builtin1 // Builtin2 defines a built-in function that accepts 2 arguments. - Builtin2 func(bctx BuiltinContext, op1, op2 *ast.Term) (*ast.Term, error) + Builtin2 = v1.Builtin2 // Builtin3 defines a built-in function that accepts 3 argument. - Builtin3 func(bctx BuiltinContext, op1, op2, op3 *ast.Term) (*ast.Term, error) + Builtin3 = v1.Builtin3 // Builtin4 defines a built-in function that accepts 4 argument. - Builtin4 func(bctx BuiltinContext, op1, op2, op3, op4 *ast.Term) (*ast.Term, error) + Builtin4 = v1.Builtin4 // BuiltinDyn defines a built-in function that accepts a list of arguments. - BuiltinDyn func(bctx BuiltinContext, terms []*ast.Term) (*ast.Term, error) + BuiltinDyn = v1.BuiltinDyn ) // RegisterBuiltin1 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin1(decl *Function, impl Builtin1) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin1(decl, impl) } // RegisterBuiltin2 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin2(decl *Function, impl Builtin2) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin2(decl, impl) } // RegisterBuiltin3 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin3(decl *Function, impl Builtin3) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin3(decl, impl) } // RegisterBuiltin4 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin4(decl *Function, impl Builtin4) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2], terms[3]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin4(decl, impl) } // RegisterBuiltinDyn adds a built-in function globally inside the OPA runtime. func RegisterBuiltinDyn(decl *Function, impl BuiltinDyn) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltinDyn(decl, impl) } // Function1 returns an option that adds a built-in function to the Rego object. func Function1(decl *Function, f Builtin1) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function1(decl, f) } // Function2 returns an option that adds a built-in function to the Rego object. func Function2(decl *Function, f Builtin2) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function2(decl, f) } // Function3 returns an option that adds a built-in function to the Rego object. func Function3(decl *Function, f Builtin3) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function3(decl, f) } // Function4 returns an option that adds a built-in function to the Rego object. func Function4(decl *Function, f Builtin4) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2], terms[3]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function4(decl, f) } // FunctionDyn returns an option that adds a built-in function to the Rego object. func FunctionDyn(decl *Function, f BuiltinDyn) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.FunctionDyn(decl, f) } // FunctionDecl returns an option that adds a custom-built-in function // __declaration__. NO implementation is provided. This is used for // non-interpreter execution envs (e.g., Wasm). func FunctionDecl(decl *Function) func(*Rego) { - return newDecl(decl) -} - -func newDecl(decl *Function) func(*Rego) { - return func(r *Rego) { - r.builtinDecls[decl.Name] = &ast.Builtin{ - Name: decl.Name, - Decl: decl.Decl, - } - } -} - -type memo struct { - term *ast.Term - err error -} - -type memokey string - -func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty func() (*ast.Term, error)) (*ast.Term, error) { - - if !decl.Memoize { - return ifEmpty() - } - - // NOTE(tsandall): we assume memoization is applied to infrequent built-in - // calls that do things like fetch data from remote locations. As such, - // converting the terms to strings is acceptable for now. - var b strings.Builder - if _, err := b.WriteString(decl.Name); err != nil { - return nil, err - } - - // The term slice _may_ include an output term depending on how the caller - // referred to the built-in function. Only use the arguments as the cache - // key. Unification ensures we don't get false positive matches. - for i := 0; i < len(decl.Decl.Args()); i++ { - if _, err := b.WriteString(terms[i].String()); err != nil { - return nil, err - } - } - - key := memokey(b.String()) - hit, ok := bctx.Cache.Get(key) - var m memo - if ok { - m = hit.(memo) - } else { - m.term, m.err = ifEmpty() - bctx.Cache.Put(key, m) - } - - return m.term, m.err + return v1.FunctionDecl(decl) } // Dump returns an argument that sets the writer to dump debugging information to. func Dump(w io.Writer) func(r *Rego) { - return func(r *Rego) { - r.dump = w - } + return v1.Dump(w) } // Query returns an argument that sets the Rego query. func Query(q string) func(r *Rego) { - return func(r *Rego) { - r.query = q - } + return v1.Query(q) } // ParsedQuery returns an argument that sets the Rego query. func ParsedQuery(q ast.Body) func(r *Rego) { - return func(r *Rego) { - r.parsedQuery = q - } + return v1.ParsedQuery(q) } // Package returns an argument that sets the Rego package on the query's // context. func Package(p string) func(r *Rego) { - return func(r *Rego) { - r.pkg = p - } + return v1.Package(p) } // ParsedPackage returns an argument that sets the Rego package on the query's // context. func ParsedPackage(pkg *ast.Package) func(r *Rego) { - return func(r *Rego) { - r.parsedPackage = pkg - } + return v1.ParsedPackage(pkg) } // Imports returns an argument that adds a Rego import to the query's context. func Imports(p []string) func(r *Rego) { - return func(r *Rego) { - r.imports = append(r.imports, p...) - } + return v1.Imports(p) } // ParsedImports returns an argument that adds Rego imports to the query's // context. func ParsedImports(imp []*ast.Import) func(r *Rego) { - return func(r *Rego) { - r.parsedImports = append(r.parsedImports, imp...) - } + return v1.ParsedImports(imp) } // Input returns an argument that sets the Rego input document. Input should be // a native Go value representing the input document. func Input(x interface{}) func(r *Rego) { - return func(r *Rego) { - r.rawInput = &x - } + return v1.Input(x) } // ParsedInput returns an argument that sets the Rego input document. func ParsedInput(x ast.Value) func(r *Rego) { - return func(r *Rego) { - r.parsedInput = x - } + return v1.ParsedInput(x) } // Unknowns returns an argument that sets the values to treat as unknown during // partial evaluation. func Unknowns(unknowns []string) func(r *Rego) { - return func(r *Rego) { - r.unknowns = unknowns - } + return v1.Unknowns(unknowns) } // ParsedUnknowns returns an argument that sets the values to treat as unknown // during partial evaluation. func ParsedUnknowns(unknowns []*ast.Term) func(r *Rego) { - return func(r *Rego) { - r.parsedUnknowns = unknowns - } + return v1.ParsedUnknowns(unknowns) } // DisableInlining adds a set of paths to exclude from partial evaluation inlining. func DisableInlining(paths []string) func(r *Rego) { - return func(r *Rego) { - r.disableInlining = paths - } + return v1.DisableInlining(paths) } // ShallowInlining prevents rules that depend on unknown values from being inlined. // Rules that only depend on known values are inlined. func ShallowInlining(yes bool) func(r *Rego) { - return func(r *Rego) { - r.shallowInlining = yes - } + return v1.ShallowInlining(yes) } // SkipPartialNamespace disables namespacing of partial evalution results for support // rules generated from policy. Synthetic support rules are still namespaced. func SkipPartialNamespace(yes bool) func(r *Rego) { - return func(r *Rego) { - r.skipPartialNamespace = yes - } + return v1.SkipPartialNamespace(yes) } // PartialNamespace returns an argument that sets the namespace to use for // partial evaluation results. The namespace must be a valid package path // component. func PartialNamespace(ns string) func(r *Rego) { - return func(r *Rego) { - r.partialNamespace = ns - } + return v1.PartialNamespace(ns) } // Module returns an argument that adds a Rego module. func Module(filename, input string) func(r *Rego) { - return func(r *Rego) { - r.modules = append(r.modules, rawModule{ - filename: filename, - module: input, - }) - } + return v1.Module(filename, input) } // ParsedModule returns an argument that adds a parsed Rego module. If a string // module with the same filename name is added, it will override the parsed // module. func ParsedModule(module *ast.Module) func(*Rego) { - return func(r *Rego) { - var filename string - if module.Package.Location != nil { - filename = module.Package.Location.File - } else { - filename = fmt.Sprintf("module_%p.rego", module) - } - r.parsedModules[filename] = module - } + return v1.ParsedModule(module) } // Load returns an argument that adds a filesystem path to load data @@ -968,9 +377,7 @@ func ParsedModule(module *ast.Module) func(*Rego) { // The Load option can only be used once. // Note: Loading files will require a write transaction on the store. func Load(paths []string, filter loader.Filter) func(r *Rego) { - return func(r *Rego) { - r.loadPaths = loadPaths{paths, filter} - } + return v1.Load(paths, filter) } // LoadBundle returns an argument that adds a filesystem path to load @@ -978,23 +385,17 @@ func Load(paths []string, filter loader.Filter) func(r *Rego) { // to be loaded as a bundle. // Note: Loading bundles will require a write transaction on the store. func LoadBundle(path string) func(r *Rego) { - return func(r *Rego) { - r.bundlePaths = append(r.bundlePaths, path) - } + return v1.LoadBundle(path) } // ParsedBundle returns an argument that adds a bundle to be loaded. func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) { - return func(r *Rego) { - r.bundles[name] = b - } + return v1.ParsedBundle(name, b) } // Compiler returns an argument that sets the Rego compiler. func Compiler(c *ast.Compiler) func(r *Rego) { - return func(r *Rego) { - r.compiler = c - } + return v1.Compiler(c) } // Store returns an argument that sets the policy engine's data storage layer. @@ -1003,18 +404,14 @@ func Compiler(c *ast.Compiler) func(r *Rego) { // must also be provided via the Transaction() option. After loading files // or bundles the transaction should be aborted or committed. func Store(s storage.Store) func(r *Rego) { - return func(r *Rego) { - r.store = s - } + return v1.Store(s) } // StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values. // // Only applicable when no store has been set on the Rego object through the Store option. func StoreReadAST(enabled bool) func(r *Rego) { - return func(r *Rego) { - r.ownStoreReadAst = enabled - } + return v1.StoreReadAST(enabled) } // Transaction returns an argument that sets the transaction to use for storage @@ -1024,93 +421,65 @@ func StoreReadAST(enabled bool) func(r *Rego) { // Store() option. If using Load(), LoadBundle(), or ParsedBundle() options // the transaction will likely require write params. func Transaction(txn storage.Transaction) func(r *Rego) { - return func(r *Rego) { - r.txn = txn - } + return v1.Transaction(txn) } // Metrics returns an argument that sets the metrics collection. func Metrics(m metrics.Metrics) func(r *Rego) { - return func(r *Rego) { - r.metrics = m - } + return v1.Metrics(m) } // Instrument returns an argument that enables instrumentation for diagnosing // performance issues. func Instrument(yes bool) func(r *Rego) { - return func(r *Rego) { - r.instrument = yes - } + return v1.Instrument(yes) } // Trace returns an argument that enables tracing on r. func Trace(yes bool) func(r *Rego) { - return func(r *Rego) { - r.trace = yes - } + return v1.Trace(yes) } // Tracer returns an argument that adds a query tracer to r. // Deprecated: Use QueryTracer instead. func Tracer(t topdown.Tracer) func(r *Rego) { - return func(r *Rego) { - if t != nil { - r.queryTracers = append(r.queryTracers, topdown.WrapLegacyTracer(t)) - } - } + return v1.Tracer(t) } // QueryTracer returns an argument that adds a query tracer to r. func QueryTracer(t topdown.QueryTracer) func(r *Rego) { - return func(r *Rego) { - if t != nil { - r.queryTracers = append(r.queryTracers, t) - } - } + return v1.QueryTracer(t) } // Runtime returns an argument that sets the runtime data to provide to the // evaluation engine. func Runtime(term *ast.Term) func(r *Rego) { - return func(r *Rego) { - r.runtime = term - } + return v1.Runtime(term) } // Time sets the wall clock time to use during policy evaluation. Prepared queries // do not inherit this parameter. Use EvalTime to set the wall clock time when // executing a prepared query. func Time(x time.Time) func(r *Rego) { - return func(r *Rego) { - r.time = x - } + return v1.Time(x) } // Seed sets a reader that will seed randomization required by built-in functions. // If a seed is not provided crypto/rand.Reader is used. func Seed(r io.Reader) func(*Rego) { - return func(e *Rego) { - e.seed = r - } + return v1.Seed(r) } // PrintTrace is a helper function to write a human-readable version of the // trace to the writer w. func PrintTrace(w io.Writer, r *Rego) { - if r == nil || r.tracebuf == nil { - return - } - topdown.PrettyTrace(w, *r.tracebuf) + v1.PrintTrace(w, r) } // PrintTraceWithLocation is a helper function to write a human-readable version of the // trace to the writer w. func PrintTraceWithLocation(w io.Writer, r *Rego) { - if r == nil || r.tracebuf == nil { - return - } - topdown.PrettyTraceWithLocation(w, *r.tracebuf) + v1.PrintTraceWithLocation(w, r) } // UnsafeBuiltins sets the built-in functions to treat as unsafe and not allow. @@ -1118,104 +487,76 @@ func PrintTraceWithLocation(w io.Writer, r *Rego) { // compiler. This option is always honored for query compilation. Provide an // empty (non-nil) map to disable checks on queries. func UnsafeBuiltins(unsafeBuiltins map[string]struct{}) func(r *Rego) { - return func(r *Rego) { - r.unsafeBuiltins = unsafeBuiltins - } + return v1.UnsafeBuiltins(unsafeBuiltins) } // SkipBundleVerification skips verification of a signed bundle. func SkipBundleVerification(yes bool) func(r *Rego) { - return func(r *Rego) { - r.skipBundleVerification = yes - } + return v1.SkipBundleVerification(yes) } // InterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize // during evaluation. func InterQueryBuiltinCache(c cache.InterQueryCache) func(r *Rego) { - return func(r *Rego) { - r.interQueryBuiltinCache = c - } + return v1.InterQueryBuiltinCache(c) } // InterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize // during evaluation. func InterQueryBuiltinValueCache(c cache.InterQueryValueCache) func(r *Rego) { - return func(r *Rego) { - r.interQueryBuiltinValueCache = c - } + return v1.InterQueryBuiltinValueCache(c) } // NDBuiltinCache sets the non-deterministic builtins cache. func NDBuiltinCache(c builtins.NDBCache) func(r *Rego) { - return func(r *Rego) { - r.ndBuiltinCache = c - } + return v1.NDBuiltinCache(c) } // StrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. func StrictBuiltinErrors(yes bool) func(r *Rego) { - return func(r *Rego) { - r.strictBuiltinErrors = yes - } + return v1.StrictBuiltinErrors(yes) } // BuiltinErrorList supplies an error slice to store built-in function errors. func BuiltinErrorList(list *[]topdown.Error) func(r *Rego) { - return func(r *Rego) { - r.builtinErrorList = list - } + return v1.BuiltinErrorList(list) } // Resolver sets a Resolver for a specified ref path. func Resolver(ref ast.Ref, r resolver.Resolver) func(r *Rego) { - return func(rego *Rego) { - rego.resolvers = append(rego.resolvers, refResolver{ref, r}) - } + return v1.Resolver(ref, r) } // Schemas sets the schemaSet func Schemas(x *ast.SchemaSet) func(r *Rego) { - return func(r *Rego) { - r.schemaSet = x - } + return v1.Schemas(x) } // Capabilities configures the underlying compiler's capabilities. // This option is ignored for module compilation if the caller supplies the // compiler. func Capabilities(c *ast.Capabilities) func(r *Rego) { - return func(r *Rego) { - r.capabilities = c - } + return v1.Capabilities(c) } // Target sets the runtime to exercise. func Target(t string) func(r *Rego) { - return func(r *Rego) { - r.target = t - } + return v1.Target(t) } // GenerateJSON sets the AST to JSON converter for the results. func GenerateJSON(f func(*ast.Term, *EvalContext) (interface{}, error)) func(r *Rego) { - return func(r *Rego) { - r.generateJSON = f - } + return v1.GenerateJSON(f) } // PrintHook sets the object to use for handling print statement outputs. func PrintHook(h print.Hook) func(r *Rego) { - return func(r *Rego) { - r.printHook = h - } + return v1.PrintHook(h) } // DistributedTracingOpts sets the options to be used by distributed tracing. func DistributedTracingOpts(tr tracing.Options) func(r *Rego) { - return func(r *Rego) { - r.distributedTacingOpts = tr - } + return v1.DistributedTracingOpts(tr) } // EnablePrintStatements enables print() calls. If this option is not provided, @@ -1223,1667 +564,65 @@ func DistributedTracingOpts(tr tracing.Options) func(r *Rego) { // queries and policies that passed as raw strings, i.e., this function will not // have any affect if the caller supplies the ast.Compiler instance. func EnablePrintStatements(yes bool) func(r *Rego) { - return func(r *Rego) { - r.enablePrintStatements = yes - } + return v1.EnablePrintStatements(yes) } // Strict enables or disables strict-mode in the compiler func Strict(yes bool) func(r *Rego) { - return func(r *Rego) { - r.strict = yes - } + return v1.Strict(yes) } func SetRegoVersion(version ast.RegoVersion) func(r *Rego) { - return func(r *Rego) { - r.regoVersion = version - } + return v1.SetRegoVersion(version) } // New returns a new Rego object. func New(options ...func(r *Rego)) *Rego { - - r := &Rego{ - parsedModules: map[string]*ast.Module{}, - capture: map[*ast.Expr]ast.Var{}, - compiledQueries: map[queryType]compiledQuery{}, - builtinDecls: map[string]*ast.Builtin{}, - builtinFuncs: map[string]*topdown.Builtin{}, - bundles: map[string]*bundle.Bundle{}, - } - - for _, option := range options { - option(r) - } - - if r.compiler == nil { - r.compiler = ast.NewCompiler(). - WithUnsafeBuiltins(r.unsafeBuiltins). - WithBuiltins(r.builtinDecls). - WithDebug(r.dump). - WithSchemas(r.schemaSet). - WithCapabilities(r.capabilities). - WithEnablePrintStatements(r.enablePrintStatements). - WithStrict(r.strict). - WithUseTypeCheckAnnotations(true) - - // topdown could be target "" or "rego", but both could be overridden by - // a target plugin (checked below) - if r.target == targetWasm { - r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) + opts := make([]func(r *Rego), 0, len(options)+1) + opts = append(opts, options...) + opts = append(opts, func(r *Rego) { + if r.RegoVersion() == ast.RegoUndefined { + SetRegoVersion(ast.DefaultRegoVersion)(r) } - } - - if r.store == nil { - r.store = inmem.NewWithOpts(inmem.OptReturnASTValuesOnRead(r.ownStoreReadAst)) - r.ownStore = true - } else { - r.ownStore = false - } - - if r.metrics == nil { - r.metrics = metrics.New() - } - - if r.instrument { - r.instrumentation = topdown.NewInstrumentation(r.metrics) - r.compiler.WithMetrics(r.metrics) - } - - if r.trace { - r.tracebuf = topdown.NewBufferTracer() - r.queryTracers = append(r.queryTracers, r.tracebuf) - } - - if r.partialNamespace == "" { - r.partialNamespace = defaultPartialNamespace - } - - if r.generateJSON == nil { - r.generateJSON = generateJSON - } - - if r.pluginMgr != nil { - for _, name := range r.pluginMgr.Plugins() { - p := r.pluginMgr.Plugin(name) - if p0, ok := p.(TargetPlugin); ok { - r.plugins = append(r.plugins, p0) - } - } - } - - if t := r.targetPlugin(r.target); t != nil { - r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) - } - - return r -} - -// Eval evaluates this Rego object and returns a ResultSet. -func (r *Rego) Eval(ctx context.Context) (ResultSet, error) { - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return nil, err - } - - pq, err := r.PrepareForEval(ctx) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return nil, err - } - - evalArgs := []EvalOption{ - EvalTransaction(r.txn), - EvalMetrics(r.metrics), - EvalInstrument(r.instrument), - EvalTime(r.time), - EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), - EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), - EvalSeed(r.seed), - } - - if r.ndBuiltinCache != nil { - evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) - } - - for _, qt := range r.queryTracers { - evalArgs = append(evalArgs, EvalQueryTracer(qt)) - } - - for i := range r.resolvers { - evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) - } - - rs, err := pq.Eval(ctx, evalArgs...) - txnErr := txnClose(ctx, err) // Always call closer - if err == nil { - err = txnErr - } - return rs, err -} - -// PartialEval has been deprecated and renamed to PartialResult. -func (r *Rego) PartialEval(ctx context.Context) (PartialResult, error) { - return r.PartialResult(ctx) -} - -// PartialResult partially evaluates this Rego object and returns a PartialResult. -func (r *Rego) PartialResult(ctx context.Context) (PartialResult, error) { - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return PartialResult{}, err - } - - pq, err := r.PrepareForEval(ctx, WithPartialEval()) - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return PartialResult{}, err - } - if txnErr != nil { - return PartialResult{}, txnErr - } - - pr := PartialResult{ - compiler: pq.r.compiler, - store: pq.r.store, - body: pq.r.parsedQuery, - builtinDecls: pq.r.builtinDecls, - builtinFuncs: pq.r.builtinFuncs, - } - - return pr, nil -} - -// Partial runs partial evaluation on r and returns the result. -func (r *Rego) Partial(ctx context.Context) (*PartialQueries, error) { - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return nil, err - } - - pq, err := r.PrepareForPartial(ctx) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return nil, err - } - - evalArgs := []EvalOption{ - EvalTransaction(r.txn), - EvalMetrics(r.metrics), - EvalInstrument(r.instrument), - EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), - EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), - } - - if r.ndBuiltinCache != nil { - evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) - } - - for _, t := range r.queryTracers { - evalArgs = append(evalArgs, EvalQueryTracer(t)) - } - - for i := range r.resolvers { - evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) - } - - pqs, err := pq.Partial(ctx, evalArgs...) - txnErr := txnClose(ctx, err) // Always call closer - if err == nil { - err = txnErr - } - return pqs, err + }) + + return v1.New(opts...) } // CompileOption defines a function to set options on Compile calls. -type CompileOption func(*CompileContext) +type CompileOption = v1.CompileOption // CompileContext contains options for Compile calls. -type CompileContext struct { - partial bool -} +type CompileContext = v1.CompileContext // CompilePartial defines an option to control whether partial evaluation is run // before the query is planned and compiled. func CompilePartial(yes bool) CompileOption { - return func(cfg *CompileContext) { - cfg.partial = yes - } -} - -// Compile returns a compiled policy query. -func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResult, error) { - - var cfg CompileContext - - for _, opt := range opts { - opt(&cfg) - } - - var queries []ast.Body - modules := make([]*ast.Module, 0, len(r.compiler.Modules)) - - if cfg.partial { - - pq, err := r.Partial(ctx) - if err != nil { - return nil, err - } - if r.dump != nil { - if len(pq.Queries) != 0 { - msg := fmt.Sprintf("QUERIES (%d total):", len(pq.Queries)) - fmt.Fprintln(r.dump, msg) - fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) - for i := range pq.Queries { - fmt.Println(pq.Queries[i]) - } - fmt.Fprintln(r.dump) - } - if len(pq.Support) != 0 { - msg := fmt.Sprintf("SUPPORT (%d total):", len(pq.Support)) - fmt.Fprintln(r.dump, msg) - fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) - for i := range pq.Support { - fmt.Println(pq.Support[i]) - } - fmt.Fprintln(r.dump) - } - } - - queries = pq.Queries - modules = pq.Support - - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - } else { - var err error - // If creating a new transaction it should be closed before calling the - // planner to avoid holding open the transaction longer than needed. - // - // TODO(tsandall): in future, planner could make use of store, in which - // case this will need to change. - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return nil, err - } - - err = r.prepare(ctx, compileQueryType, nil) - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return nil, err - } - if txnErr != nil { - return nil, err - } - - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - - queries = []ast.Body{r.compiledQueries[compileQueryType].query} - } - - if tgt := r.targetPlugin(r.target); tgt != nil { - return nil, fmt.Errorf("unsupported for rego target plugins") - } - - return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here -} - -func (r *Rego) compileWasm(_ []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { - policy, err := r.planQuery(queries, qType) - if err != nil { - return nil, err - } - - m, err := wasm.New().WithPolicy(policy).Compile() - if err != nil { - return nil, err - } - - var out bytes.Buffer - if err := encoding.WriteModule(&out, m); err != nil { - return nil, err - } - - return &CompileResult{ - Bytes: out.Bytes(), - }, nil + return v1.CompilePartial(yes) } // PrepareOption defines a function to set an option to control // the behavior of the Prepare call. -type PrepareOption func(*PrepareConfig) +type PrepareOption = v1.PrepareOption // PrepareConfig holds settings to control the behavior of the // Prepare call. -type PrepareConfig struct { - doPartialEval bool - disableInlining *[]string - builtinFuncs map[string]*topdown.Builtin -} +type PrepareConfig = v1.PrepareConfig // WithPartialEval configures an option for PrepareForEval // which will have it perform partial evaluation while preparing // the query (similar to rego.Rego#PartialResult) func WithPartialEval() PrepareOption { - return func(p *PrepareConfig) { - p.doPartialEval = true - } + return v1.WithPartialEval() } // WithNoInline adds a set of paths to exclude from partial evaluation inlining. func WithNoInline(paths []string) PrepareOption { - return func(p *PrepareConfig) { - p.disableInlining = &paths - } + return v1.WithNoInline(paths) } // WithBuiltinFuncs carries the rego.Function{1,2,3} per-query function definitions // to the target plugins. func WithBuiltinFuncs(bis map[string]*topdown.Builtin) PrepareOption { - return func(p *PrepareConfig) { - if p.builtinFuncs == nil { - p.builtinFuncs = make(map[string]*topdown.Builtin, len(bis)) - } - for k, v := range bis { - p.builtinFuncs[k] = v - } - } -} - -// BuiltinFuncs allows retrieving the builtin funcs set via PrepareOption -// WithBuiltinFuncs. -func (p *PrepareConfig) BuiltinFuncs() map[string]*topdown.Builtin { - return p.builtinFuncs -} - -// PrepareForEval will parse inputs, modules, and query arguments in preparation -// of evaluating them. -func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) { - if !r.hasQuery() { - return PreparedEvalQuery{}, fmt.Errorf("cannot evaluate empty query") - } - - pCfg := &PrepareConfig{} - for _, o := range opts { - o(pCfg) - } - - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return PreparedEvalQuery{}, err - } - - // If the caller wanted to do partial evaluation as part of preparation - // do it now and use the new Rego object. - if pCfg.doPartialEval { - - pr, err := r.partialResult(ctx, pCfg) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - // Prepare the new query using the result of partial evaluation - pq, err := pr.Rego(Transaction(r.txn)).PrepareForEval(ctx) - txnErr := txnClose(ctx, err) - if err != nil { - return pq, err - } - return pq, txnErr - } - - err = r.prepare(ctx, evalQueryType, []extraStage{ - { - after: "ResolveRefs", - stage: ast.QueryCompilerStageDefinition{ - Name: "RewriteToCaptureValue", - MetricName: "query_compile_stage_rewrite_to_capture_value", - Stage: r.rewriteQueryToCaptureValue, - }, - }, - }) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - switch r.target { - case targetWasm: // TODO(sr): make wasm a target plugin, too - - if r.hasWasmModule() { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, fmt.Errorf("wasm target not supported") - } - - var modules []*ast.Module - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - - queries := []ast.Body{r.compiledQueries[evalQueryType].query} - - e, err := opa.LookupEngine(targetWasm) - if err != nil { - return PreparedEvalQuery{}, err - } - - // nolint: staticcheck // SA4006 false positive - cr, err := r.compileWasm(modules, queries, evalQueryType) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - // nolint: staticcheck // SA4006 false positive - data, err := r.store.Read(ctx, r.txn, storage.Path{}) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - o, err := e.New().WithPolicyBytes(cr.Bytes).WithDataJSON(data).Init() - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - r.opa = o - - case targetRego: // do nothing, don't lookup default plugin - default: // either a specific plugin target, or one that is default - if tgt := r.targetPlugin(r.target); tgt != nil { - queries := []ast.Body{r.compiledQueries[evalQueryType].query} - pol, err := r.planQuery(queries, evalQueryType) - if err != nil { - return PreparedEvalQuery{}, err - } - // always add the builtins provided via rego.FunctionN options - opts = append(opts, WithBuiltinFuncs(r.builtinFuncs)) - r.targetPrepState, err = tgt.PrepareForEval(ctx, pol, opts...) - if err != nil { - return PreparedEvalQuery{}, err - } - } - } - - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return PreparedEvalQuery{}, err - } - if txnErr != nil { - return PreparedEvalQuery{}, txnErr - } - - return PreparedEvalQuery{preparedQuery{r, pCfg}}, err -} - -// PrepareForPartial will parse inputs, modules, and query arguments in preparation -// of partially evaluating them. -func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) { - if !r.hasQuery() { - return PreparedPartialQuery{}, fmt.Errorf("cannot evaluate empty query") - } - - pCfg := &PrepareConfig{} - for _, o := range opts { - o(pCfg) - } - - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return PreparedPartialQuery{}, err - } - - err = r.prepare(ctx, partialQueryType, []extraStage{ - { - after: "CheckSafety", - stage: ast.QueryCompilerStageDefinition{ - Name: "RewriteEquals", - MetricName: "query_compile_stage_rewrite_equals", - Stage: r.rewriteEqualsForPartialQueryCompile, - }, - }, - }) - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return PreparedPartialQuery{}, err - } - if txnErr != nil { - return PreparedPartialQuery{}, txnErr - } - - return PreparedPartialQuery{preparedQuery{r, pCfg}}, err -} - -func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage) error { - var err error - - r.parsedInput, err = r.parseInput() - if err != nil { - return err - } - - err = r.loadFiles(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - err = r.loadBundles(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - err = r.parseModules(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - // Compile the modules *before* the query, else functions - // defined in the module won't be found... - err = r.compileModules(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - imports, err := r.prepareImports() - if err != nil { - return err - } - - queryImports := []*ast.Import{} - for _, imp := range imports { - path := imp.Path.Value.(ast.Ref) - if path.HasPrefix([]*ast.Term{ast.FutureRootDocument}) || path.HasPrefix([]*ast.Term{ast.RegoRootDocument}) { - queryImports = append(queryImports, imp) - } - } - - r.parsedQuery, err = r.parseQuery(queryImports, r.metrics) - if err != nil { - return err - } - - err = r.compileAndCacheQuery(qType, r.parsedQuery, imports, r.metrics, extras) - if err != nil { - return err - } - - return nil -} - -func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { - if len(r.modules) == 0 { - return nil - } - - ids, err := r.store.ListPolicies(ctx, txn) - if err != nil { - return err - } - - m.Timer(metrics.RegoModuleParse).Start() - defer m.Timer(metrics.RegoModuleParse).Stop() - var errs Errors - - // Parse any modules that are saved to the store, but only if - // another compile step is going to occur (ie. we have parsed modules - // that need to be compiled). - for _, id := range ids { - // if it is already on the compiler we're using - // then don't bother to re-parse it from source - if _, haveMod := r.compiler.Modules[id]; haveMod { - continue - } - - bs, err := r.store.GetPolicy(ctx, txn, id) - if err != nil { - return err - } - - parsed, err := ast.ParseModuleWithOpts(id, string(bs), ast.ParserOptions{RegoVersion: r.regoVersion}) - if err != nil { - errs = append(errs, err) - } - - r.parsedModules[id] = parsed - } - - // Parse any passed in as arguments to the Rego object - for _, module := range r.modules { - p, err := module.ParseWithOpts(ast.ParserOptions{RegoVersion: r.regoVersion}) - if err != nil { - switch errorWithType := err.(type) { - case ast.Errors: - for _, e := range errorWithType { - errs = append(errs, e) - } - default: - errs = append(errs, errorWithType) - } - } - r.parsedModules[module.filename] = p - } - - if len(errs) > 0 { - return errs - } - - return nil -} - -func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { - if len(r.loadPaths.paths) == 0 { - return nil - } - - m.Timer(metrics.RegoLoadFiles).Start() - defer m.Timer(metrics.RegoLoadFiles).Stop() - - result, err := loader.NewFileLoader(). - WithMetrics(m). - WithProcessAnnotation(true). - WithRegoVersion(r.regoVersion). - Filtered(r.loadPaths.paths, r.loadPaths.filter) - if err != nil { - return err - } - for name, mod := range result.Modules { - r.parsedModules[name] = mod.Parsed - } - - if len(result.Documents) > 0 { - err = r.store.Write(ctx, txn, storage.AddOp, storage.Path{}, result.Documents) - if err != nil { - return err - } - } - return nil -} - -func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.Metrics) error { - if len(r.bundlePaths) == 0 { - return nil - } - - m.Timer(metrics.RegoLoadBundles).Start() - defer m.Timer(metrics.RegoLoadBundles).Stop() - - for _, path := range r.bundlePaths { - bndl, err := loader.NewFileLoader(). - WithMetrics(m). - WithProcessAnnotation(true). - WithSkipBundleVerification(r.skipBundleVerification). - WithRegoVersion(r.regoVersion). - AsBundle(path) - if err != nil { - return fmt.Errorf("loading error: %s", err) - } - r.bundles[path] = bndl - } - return nil -} - -func (r *Rego) parseInput() (ast.Value, error) { - if r.parsedInput != nil { - return r.parsedInput, nil - } - return r.parseRawInput(r.rawInput, r.metrics) -} - -func (r *Rego) parseRawInput(rawInput *interface{}, m metrics.Metrics) (ast.Value, error) { - var input ast.Value - - if rawInput == nil { - return input, nil - } - - m.Timer(metrics.RegoInputParse).Start() - defer m.Timer(metrics.RegoInputParse).Stop() - - rawPtr := util.Reference(rawInput) - - // roundtrip through json: this turns slices (e.g. []string, []bool) into - // []interface{}, the only array type ast.InterfaceToValue can work with - if err := util.RoundTrip(rawPtr); err != nil { - return nil, err - } - - return ast.InterfaceToValue(*rawPtr) -} - -func (r *Rego) parseQuery(queryImports []*ast.Import, m metrics.Metrics) (ast.Body, error) { - if r.parsedQuery != nil { - return r.parsedQuery, nil - } - - m.Timer(metrics.RegoQueryParse).Start() - defer m.Timer(metrics.RegoQueryParse).Stop() - - popts, err := future.ParserOptionsFromFutureImports(queryImports) - if err != nil { - return nil, err - } - popts.RegoVersion = r.regoVersion - popts, err = parserOptionsFromRegoVersionImport(queryImports, popts) - if err != nil { - return nil, err - } - popts.SkipRules = true - return ast.ParseBodyWithOpts(r.query, popts) -} - -func parserOptionsFromRegoVersionImport(imports []*ast.Import, popts ast.ParserOptions) (ast.ParserOptions, error) { - for _, imp := range imports { - path := imp.Path.Value.(ast.Ref) - if ast.Compare(path, ast.RegoV1CompatibleRef) == 0 { - popts.RegoVersion = ast.RegoV1 - return popts, nil - } - } - return popts, nil -} - -func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { - - // Only compile again if there are new modules. - if len(r.bundles) > 0 || len(r.parsedModules) > 0 { - - // The bundle.Activate call will activate any bundles passed in - // (ie compile + handle data store changes), and include any of - // the additional modules passed in. If no bundles are provided - // it will only compile the passed in modules. - // Use this as the single-point of compiling everything only a - // single time. - opts := &bundle.ActivateOpts{ - Ctx: ctx, - Store: r.store, - Txn: txn, - Compiler: r.compilerForTxn(ctx, r.store, txn), - Metrics: m, - Bundles: r.bundles, - ExtraModules: r.parsedModules, - ParserOptions: ast.ParserOptions{RegoVersion: r.regoVersion}, - } - err := bundle.Activate(opts) - if err != nil { - return err - } - } - - // Ensure all configured resolvers from the store are loaded. Skip if any were explicitly provided. - if len(r.resolvers) == 0 { - resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, r.store, txn, r.bundles) - if err != nil { - return err - } - - for _, rslvr := range resolvers { - for _, ep := range rslvr.Entrypoints() { - r.resolvers = append(r.resolvers, refResolver{ep, rslvr}) - } - } - } - return nil -} - -func (r *Rego) compileAndCacheQuery(qType queryType, query ast.Body, imports []*ast.Import, m metrics.Metrics, extras []extraStage) error { - m.Timer(metrics.RegoQueryCompile).Start() - defer m.Timer(metrics.RegoQueryCompile).Stop() - - cachedQuery, ok := r.compiledQueries[qType] - if ok && cachedQuery.query != nil && cachedQuery.compiler != nil { - return nil - } - - qc, compiled, err := r.compileQuery(query, imports, m, extras) - if err != nil { - return err - } - - // cache the query for future use - r.compiledQueries[qType] = compiledQuery{ - query: compiled, - compiler: qc, - } - return nil -} - -func (r *Rego) prepareImports() ([]*ast.Import, error) { - imports := r.parsedImports - - if len(r.imports) > 0 { - s := make([]string, len(r.imports)) - for i := range r.imports { - s[i] = fmt.Sprintf("import %v", r.imports[i]) - } - parsed, err := ast.ParseImports(strings.Join(s, "\n")) - if err != nil { - return nil, err - } - imports = append(imports, parsed...) - } - return imports, nil -} - -func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) { - var pkg *ast.Package - - if r.pkg != "" { - var err error - pkg, err = ast.ParsePackage(fmt.Sprintf("package %v", r.pkg)) - if err != nil { - return nil, nil, err - } - } else { - pkg = r.parsedPackage - } - - qctx := ast.NewQueryContext(). - WithPackage(pkg). - WithImports(imports) - - qc := r.compiler.QueryCompiler(). - WithContext(qctx). - WithUnsafeBuiltins(r.unsafeBuiltins). - WithEnablePrintStatements(r.enablePrintStatements). - WithStrict(false) - - for _, extra := range extras { - qc = qc.WithStageAfter(extra.after, extra.stage) - } - - compiled, err := qc.Compile(query) - - return qc, compiled, err - -} - -func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) { - switch { - case r.targetPrepState != nil: // target plugin flow - var val ast.Value - if r.runtime != nil { - val = r.runtime.Value - } - s, err := r.targetPrepState.Eval(ctx, ectx, val) - if err != nil { - return nil, err - } - return r.valueToQueryResult(s, ectx) - case r.target == targetWasm: - return r.evalWasm(ctx, ectx) - case r.target == targetRego: // continue - } - - q := topdown.NewQuery(ectx.compiledQuery.query). - WithQueryCompiler(ectx.compiledQuery.compiler). - WithCompiler(r.compiler). - WithStore(r.store). - WithTransaction(ectx.txn). - WithBuiltins(r.builtinFuncs). - WithMetrics(ectx.metrics). - WithInstrumentation(ectx.instrumentation). - WithRuntime(r.runtime). - WithIndexing(ectx.indexing). - WithEarlyExit(ectx.earlyExit). - WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). - WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). - WithStrictBuiltinErrors(r.strictBuiltinErrors). - WithBuiltinErrorList(r.builtinErrorList). - WithSeed(ectx.seed). - WithPrintHook(ectx.printHook). - WithDistributedTracingOpts(r.distributedTacingOpts). - WithVirtualCache(ectx.virtualCache) - - if !ectx.time.IsZero() { - q = q.WithTime(ectx.time) - } - - if ectx.ndBuiltinCache != nil { - q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) - } - - for i := range ectx.queryTracers { - q = q.WithQueryTracer(ectx.queryTracers[i]) - } - - if ectx.parsedInput != nil { - q = q.WithInput(ast.NewTerm(ectx.parsedInput)) - } - - for i := range ectx.resolvers { - q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) - } - - // Cancel query if context is cancelled or deadline is reached. - c := topdown.NewCancel() - q = q.WithCancel(c) - exit := make(chan struct{}) - defer close(exit) - go waitForDone(ctx, exit, func() { - c.Cancel() - }) - - var rs ResultSet - err := q.Iter(ctx, func(qr topdown.QueryResult) error { - result, err := r.generateResult(qr, ectx) - if err != nil { - return err - } - rs = append(rs, result) - return nil - }) - - if err != nil { - return nil, err - } - - if len(rs) == 0 { - return nil, nil - } - - return rs, nil -} - -func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, error) { - input := ectx.rawInput - if ectx.parsedInput != nil { - i := interface{}(ectx.parsedInput) - input = &i - } - result, err := r.opa.Eval(ctx, opa.EvalOpts{ - Metrics: r.metrics, - Input: input, - Time: ectx.time, - Seed: ectx.seed, - InterQueryBuiltinCache: ectx.interQueryBuiltinCache, - NDBuiltinCache: ectx.ndBuiltinCache, - PrintHook: ectx.printHook, - Capabilities: ectx.capabilities, - }) - if err != nil { - return nil, err - } - - parsed, err := ast.ParseTerm(string(result.Result)) - if err != nil { - return nil, err - } - - return r.valueToQueryResult(parsed.Value, ectx) -} - -func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) { - resultSet, ok := res.(ast.Set) - if !ok { - return nil, fmt.Errorf("illegal result type") - } - - if resultSet.Len() == 0 { - return nil, nil - } - - var rs ResultSet - err := resultSet.Iter(func(term *ast.Term) error { - obj, ok := term.Value.(ast.Object) - if !ok { - return fmt.Errorf("illegal result type") - } - qr := topdown.QueryResult{} - obj.Foreach(func(k, v *ast.Term) { - kvt := ast.VarTerm(string(k.Value.(ast.String))) - qr[kvt.Value.(ast.Var)] = v - }) - result, err := r.generateResult(qr, ectx) - if err != nil { - return err - } - rs = append(rs, result) - return nil - }) - - return rs, err -} - -func (r *Rego) generateResult(qr topdown.QueryResult, ectx *EvalContext) (Result, error) { - - rewritten := ectx.compiledQuery.compiler.RewrittenVars() - - result := newResult() - for k, term := range qr { - v, err := r.generateJSON(term, ectx) - if err != nil { - return result, err - } - - if rw, ok := rewritten[k]; ok { - k = rw - } - if isTermVar(k) || isTermWasmVar(k) || k.IsGenerated() || k.IsWildcard() { - continue - } - result.Bindings[string(k)] = v - } - - for _, expr := range ectx.compiledQuery.query { - if expr.Generated { - continue - } - - if k, ok := r.capture[expr]; ok { - v, err := r.generateJSON(qr[k], ectx) - if err != nil { - return result, err - } - result.Expressions = append(result.Expressions, newExpressionValue(expr, v)) - } else { - result.Expressions = append(result.Expressions, newExpressionValue(expr, true)) - } - - } - return result, nil -} - -func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialResult, error) { - - err := r.prepare(ctx, partialResultQueryType, []extraStage{ - { - after: "ResolveRefs", - stage: ast.QueryCompilerStageDefinition{ - Name: "RewriteForPartialEval", - MetricName: "query_compile_stage_rewrite_for_partial_eval", - Stage: r.rewriteQueryForPartialEval, - }, - }, - }) - if err != nil { - return PartialResult{}, err - } - - ectx := &EvalContext{ - parsedInput: r.parsedInput, - metrics: r.metrics, - txn: r.txn, - partialNamespace: r.partialNamespace, - queryTracers: r.queryTracers, - compiledQuery: r.compiledQueries[partialResultQueryType], - instrumentation: r.instrumentation, - indexing: true, - resolvers: r.resolvers, - capabilities: r.capabilities, - strictBuiltinErrors: r.strictBuiltinErrors, - } - - disableInlining := r.disableInlining - - if pCfg.disableInlining != nil { - disableInlining = *pCfg.disableInlining - } - - ectx.disableInlining, err = parseStringsToRefs(disableInlining) - if err != nil { - return PartialResult{}, err - } - - pq, err := r.partial(ctx, ectx) - if err != nil { - return PartialResult{}, err - } - - // Construct module for queries. - id := fmt.Sprintf("__partialresult__%s__", ectx.partialNamespace) - - module, err := ast.ParseModule(id, "package "+ectx.partialNamespace) - if err != nil { - return PartialResult{}, fmt.Errorf("bad partial namespace") - } - - module.Rules = make([]*ast.Rule, len(pq.Queries)) - for i, body := range pq.Queries { - rule := &ast.Rule{ - Head: ast.NewHead(ast.Var("__result__"), nil, ast.Wildcard), - Body: body, - Module: module, - } - module.Rules[i] = rule - if checkPartialResultForRecursiveRefs(body, rule.Path()) { - return PartialResult{}, Errors{errPartialEvaluationNotEffective} - } - } - - // Update compiler with partial evaluation output. - r.compiler.Modules[id] = module - for i, module := range pq.Support { - r.compiler.Modules[fmt.Sprintf("__partialsupport__%s__%d__", ectx.partialNamespace, i)] = module - } - - r.metrics.Timer(metrics.RegoModuleCompile).Start() - r.compilerForTxn(ctx, r.store, r.txn).Compile(r.compiler.Modules) - r.metrics.Timer(metrics.RegoModuleCompile).Stop() - - if r.compiler.Failed() { - return PartialResult{}, r.compiler.Errors - } - - result := PartialResult{ - compiler: r.compiler, - store: r.store, - body: ast.MustParseBody(fmt.Sprintf("data.%v.__result__", ectx.partialNamespace)), - builtinDecls: r.builtinDecls, - builtinFuncs: r.builtinFuncs, - } - - return result, nil -} - -func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, error) { - - var unknowns []*ast.Term - - switch { - case ectx.parsedUnknowns != nil: - unknowns = ectx.parsedUnknowns - case ectx.unknowns != nil: - unknowns = make([]*ast.Term, len(ectx.unknowns)) - for i := range ectx.unknowns { - var err error - unknowns[i], err = ast.ParseTerm(ectx.unknowns[i]) - if err != nil { - return nil, err - } - } - default: - // Use input document as unknown if caller has not specified any. - unknowns = []*ast.Term{ast.NewTerm(ast.InputRootRef)} - } - - q := topdown.NewQuery(ectx.compiledQuery.query). - WithQueryCompiler(ectx.compiledQuery.compiler). - WithCompiler(r.compiler). - WithStore(r.store). - WithTransaction(ectx.txn). - WithBuiltins(r.builtinFuncs). - WithMetrics(ectx.metrics). - WithInstrumentation(ectx.instrumentation). - WithUnknowns(unknowns). - WithDisableInlining(ectx.disableInlining). - WithRuntime(r.runtime). - WithIndexing(ectx.indexing). - WithEarlyExit(ectx.earlyExit). - WithPartialNamespace(ectx.partialNamespace). - WithSkipPartialNamespace(r.skipPartialNamespace). - WithShallowInlining(r.shallowInlining). - WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). - WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). - WithStrictBuiltinErrors(ectx.strictBuiltinErrors). - WithSeed(ectx.seed). - WithPrintHook(ectx.printHook) - - if !ectx.time.IsZero() { - q = q.WithTime(ectx.time) - } - - if ectx.ndBuiltinCache != nil { - q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) - } - - for i := range ectx.queryTracers { - q = q.WithQueryTracer(ectx.queryTracers[i]) - } - - if ectx.parsedInput != nil { - q = q.WithInput(ast.NewTerm(ectx.parsedInput)) - } - - for i := range ectx.resolvers { - q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) - } - - // Cancel query if context is cancelled or deadline is reached. - c := topdown.NewCancel() - q = q.WithCancel(c) - exit := make(chan struct{}) - defer close(exit) - go waitForDone(ctx, exit, func() { - c.Cancel() - }) - - queries, support, err := q.PartialRun(ctx) - if err != nil { - return nil, err - } - - // If the target rego-version is v0, and the rego.v1 import is available, then we attempt to apply it to support modules. - if r.regoVersion == ast.RegoV0 && (r.capabilities == nil || r.capabilities.ContainsFeature(ast.FeatureRegoV1Import)) { - - for i, mod := range support { - // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that - // conflict with future keywords. - applyRegoVersion := true - - ast.WalkRules(mod, func(r *ast.Rule) bool { - name := r.Head.Name - if name == "" && len(r.Head.Reference) > 0 { - name = r.Head.Reference[0].Value.(ast.Var) - } - if ast.IsFutureKeyword(name.String()) { - applyRegoVersion = false - return true - } - return false - }) - - if applyRegoVersion { - ast.WalkVars(mod, func(v ast.Var) bool { - if ast.IsFutureKeyword(v.String()) { - applyRegoVersion = false - return true - } - return false - }) - } - - if applyRegoVersion { - support[i].SetRegoVersion(ast.RegoV0CompatV1) - } else { - support[i].SetRegoVersion(r.regoVersion) - } - } - } else { - // If the target rego-version is not v0, then we apply the target rego-version to the support modules. - for i := range support { - support[i].SetRegoVersion(r.regoVersion) - } - } - - pq := &PartialQueries{ - Queries: queries, - Support: support, - } - - return pq, nil -} - -func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { - - checkCapture := iteration(query) || len(query) > 1 - - for _, expr := range query { - - if expr.Negated { - continue - } - - if expr.IsAssignment() || expr.IsEquality() { - continue - } - - var capture *ast.Term - - // If the expression can be evaluated as a function, rewrite it to - // capture the return value. E.g., neq(1,2) becomes neq(1,2,x) but - // plus(1,2,x) does not get rewritten. - switch terms := expr.Terms.(type) { - case *ast.Term: - capture = r.generateTermVar() - expr.Terms = ast.Equality.Expr(terms, capture).Terms - r.capture[expr] = capture.Value.(ast.Var) - case []*ast.Term: - tpe := r.compiler.TypeEnv.Get(terms[0]) - if !types.Void(tpe) && types.Arity(tpe) == len(terms)-1 { - capture = r.generateTermVar() - expr.Terms = append(terms, capture) - r.capture[expr] = capture.Value.(ast.Var) - } - } - - if capture != nil && checkCapture { - cpy := expr.Copy() - cpy.Terms = capture - cpy.Generated = true - cpy.With = nil - query.Append(cpy) - } - } - - return query, nil -} - -func (r *Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { - if len(query) != 1 { - return nil, fmt.Errorf("partial evaluation requires single ref (not multiple expressions)") - } - - term, ok := query[0].Terms.(*ast.Term) - if !ok { - return nil, fmt.Errorf("partial evaluation requires ref (not expression)") - } - - ref, ok := term.Value.(ast.Ref) - if !ok { - return nil, fmt.Errorf("partial evaluation requires ref (not %v)", ast.TypeName(term.Value)) - } - - if !ref.IsGround() { - return nil, fmt.Errorf("partial evaluation requires ground ref") - } - - return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil -} - -// rewriteEqualsForPartialQueryCompile will rewrite == to = in queries. Normally -// this wouldn't be done, except for handling queries with the `Partial` API -// where rewriting them can substantially simplify the result, and it is unlikely -// that the caller would need expression values. -func (r *Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { - doubleEq := ast.Equal.Ref() - unifyOp := ast.Equality.Ref() - ast.WalkExprs(query, func(x *ast.Expr) bool { - if x.IsCall() { - operator := x.Operator() - if operator.Equal(doubleEq) && len(x.Operands()) == 2 { - x.SetOperator(ast.NewTerm(unifyOp)) - } - } - return false - }) - return query, nil -} - -func (r *Rego) generateTermVar() *ast.Term { - r.termVarID++ - prefix := ast.WildcardPrefix - if p := r.targetPlugin(r.target); p != nil { - prefix = wasmVarPrefix - } else if r.target == targetWasm { - prefix = wasmVarPrefix - } - return ast.VarTerm(fmt.Sprintf("%sterm%v", prefix, r.termVarID)) -} - -func (r Rego) hasQuery() bool { - return len(r.query) != 0 || len(r.parsedQuery) != 0 -} - -func (r Rego) hasWasmModule() bool { - for _, b := range r.bundles { - if len(b.WasmModules) > 0 { - return true - } - } - return false -} - -type transactionCloser func(ctx context.Context, err error) error - -// getTxn will conditionally create a read or write transaction suitable for -// the configured Rego object. The returned function should be used to close the txn -// regardless of status. -func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) { - - noopCloser := func(_ context.Context, _ error) error { - return nil // no-op default - } - - if r.txn != nil { - // Externally provided txn - return r.txn, noopCloser, nil - } - - // Create a new transaction.. - params := storage.TransactionParams{} - - // Bundles and data paths may require writing data files or manifests to storage - if len(r.bundles) > 0 || len(r.bundlePaths) > 0 || len(r.loadPaths.paths) > 0 { - - // If we were given a store we will *not* write to it, only do that on one - // which was created automatically on behalf of the user. - if !r.ownStore { - return nil, noopCloser, errors.New("unable to start write transaction when store was provided") - } - - params.Write = true - } - - txn, err := r.store.NewTransaction(ctx, params) - if err != nil { - return nil, noopCloser, err - } - - // Setup a closer function that will abort or commit as needed. - closer := func(ctx context.Context, txnErr error) error { - var err error - - if txnErr == nil && params.Write { - err = r.store.Commit(ctx, txn) - } else { - r.store.Abort(ctx, txn) - } - - // Clear the auto created transaction now that it is closed. - r.txn = nil - - return err - } - - return txn, closer, nil -} - -func (r *Rego) compilerForTxn(ctx context.Context, store storage.Store, txn storage.Transaction) *ast.Compiler { - // Update the compiler to have a valid path conflict check - // for the current context and transaction. - return r.compiler.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn)) -} - -func checkPartialResultForRecursiveRefs(body ast.Body, path ast.Ref) bool { - var stop bool - ast.WalkRefs(body, func(x ast.Ref) bool { - if !stop { - if path.HasPrefix(x) { - stop = true - } - } - return stop - }) - return stop -} - -func isTermVar(v ast.Var) bool { - return strings.HasPrefix(string(v), ast.WildcardPrefix+"term") -} - -func isTermWasmVar(v ast.Var) bool { - return strings.HasPrefix(string(v), wasmVarPrefix+"term") -} - -func waitForDone(ctx context.Context, exit chan struct{}, f func()) { - select { - case <-exit: - return - case <-ctx.Done(): - f() - return - } -} - -type rawModule struct { - filename string - module string -} - -func (m rawModule) Parse() (*ast.Module, error) { - return ast.ParseModule(m.filename, m.module) -} - -func (m rawModule) ParseWithOpts(opts ast.ParserOptions) (*ast.Module, error) { - return ast.ParseModuleWithOpts(m.filename, m.module, opts) -} - -type extraStage struct { - after string - stage ast.QueryCompilerStageDefinition -} - -type refResolver struct { - ref ast.Ref - r resolver.Resolver -} - -func iteration(x interface{}) bool { - - var stopped bool - - vis := ast.NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case *ast.Term: - if ast.IsComprehension(x.Value) { - return true - } - case ast.Ref: - if !stopped { - if bi := ast.BuiltinMap[x.String()]; bi != nil { - if bi.Relation { - stopped = true - return stopped - } - } - for i := 1; i < len(x); i++ { - if _, ok := x[i].Value.(ast.Var); ok { - stopped = true - return stopped - } - } - } - return stopped - } - return stopped - }) - - vis.Walk(x) - - return stopped -} - -func parseStringsToRefs(s []string) ([]ast.Ref, error) { - - refs := make([]ast.Ref, len(s)) - for i := range refs { - var err error - refs[i], err = ast.ParseRef(s[i]) - if err != nil { - return nil, err - } - } - - return refs, nil -} - -// helper function to finish a built-in function call. If an error occurred, -// wrap the error and return it. Otherwise, invoke the iterator if the result -// was defined. -func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error { - if err != nil { - var e *HaltError - if errors.As(err, &e) { - tdErr := &topdown.Error{ - Code: topdown.BuiltinErr, - Message: fmt.Sprintf("%v: %v", name, e.Error()), - Location: bctx.Location, - } - return topdown.Halt{Err: tdErr.Wrap(e)} - } - tdErr := &topdown.Error{ - Code: topdown.BuiltinErr, - Message: fmt.Sprintf("%v: %v", name, err.Error()), - Location: bctx.Location, - } - return tdErr.Wrap(err) - } - if result == nil { - return nil - } - return iter(result) -} - -// helper function to return an option that sets a custom built-in function. -func newFunction(decl *Function, f topdown.BuiltinFunc) func(*Rego) { - return func(r *Rego) { - r.builtinDecls[decl.Name] = &ast.Builtin{ - Name: decl.Name, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - } - r.builtinFuncs[decl.Name] = &topdown.Builtin{ - Decl: r.builtinDecls[decl.Name], - Func: f, - } - } -} - -func generateJSON(term *ast.Term, ectx *EvalContext) (interface{}, error) { - return ast.JSONWithOpt(term.Value, - ast.JSONOpt{ - SortSets: ectx.sortSets, - CopyMaps: ectx.copyMaps, - }) -} - -func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Policy, error) { - modules := make([]*ast.Module, 0, len(r.compiler.Modules)) - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - - decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap)) - - for k, v := range ast.BuiltinMap { - decls[k] = v - } - - for k, v := range r.builtinDecls { - decls[k] = v - } - - const queryName = "eval" // NOTE(tsandall): the query name is arbitrary - - p := planner.New(). - WithQueries([]planner.QuerySet{ - { - Name: queryName, - Queries: queries, - RewrittenVars: r.compiledQueries[evalQueryType].compiler.RewrittenVars(), - }, - }). - WithModules(modules). - WithBuiltinDecls(decls). - WithDebug(r.dump) - - policy, err := p.Plan() - if err != nil { - return nil, err - } - if r.dump != nil { - fmt.Fprintln(r.dump, "PLAN:") - fmt.Fprintln(r.dump, "-----") - err = ir.Pretty(r.dump, policy) - if err != nil { - return nil, err - } - fmt.Fprintln(r.dump) - } - return policy, nil + return v1.WithBuiltinFuncs(bis) } diff --git a/vendor/github.com/open-policy-agent/opa/rego/resultset.go b/vendor/github.com/open-policy-agent/opa/rego/resultset.go index e60fa6fbe4b..5c03360dfaa 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/resultset.go +++ b/vendor/github.com/open-policy-agent/opa/rego/resultset.go @@ -1,90 +1,22 @@ package rego import ( - "fmt" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/rego" ) // ResultSet represents a collection of output from Rego evaluation. An empty // result set represents an undefined query. -type ResultSet []Result +type ResultSet = v1.ResultSet // Vars represents a collection of variable bindings. The keys are the variable // names and the values are the binding values. -type Vars map[string]interface{} - -// WithoutWildcards returns a copy of v with wildcard variables removed. -func (v Vars) WithoutWildcards() Vars { - n := Vars{} - for k, v := range v { - if ast.Var(k).IsWildcard() || ast.Var(k).IsGenerated() { - continue - } - n[k] = v - } - return n -} +type Vars = v1.Vars // Result defines the output of Rego evaluation. -type Result struct { - Expressions []*ExpressionValue `json:"expressions"` - Bindings Vars `json:"bindings,omitempty"` -} - -func newResult() Result { - return Result{ - Bindings: Vars{}, - } -} +type Result = v1.Result // Location defines a position in a Rego query or module. -type Location struct { - Row int `json:"row"` - Col int `json:"col"` -} +type Location = v1.Location // ExpressionValue defines the value of an expression in a Rego query. -type ExpressionValue struct { - Value interface{} `json:"value"` - Text string `json:"text"` - Location *Location `json:"location"` -} - -func newExpressionValue(expr *ast.Expr, value interface{}) *ExpressionValue { - result := &ExpressionValue{ - Value: value, - } - if expr.Location != nil { - result.Text = string(expr.Location.Text) - result.Location = &Location{ - Row: expr.Location.Row, - Col: expr.Location.Col, - } - } - return result -} - -func (ev *ExpressionValue) String() string { - return fmt.Sprint(ev.Value) -} - -// Allowed is a helper method that'll return true if all of these conditions hold: -// - the result set only has one element -// - there is only one expression in the result set's only element -// - that expression has the value `true` -// - there are no bindings. -// -// If bindings are present, this will yield `false`: it would be a pitfall to -// return `true` for a query like `data.authz.allow = x`, which always has result -// set element with value true, but could also have a binding `x: false`. -func (rs ResultSet) Allowed() bool { - if len(rs) == 1 && len(rs[0].Bindings) == 0 { - if exprs := rs[0].Expressions; len(exprs) == 1 { - if b, ok := exprs[0].Value.(bool); ok { - return b - } - } - } - return false -} +type ExpressionValue = v1.ExpressionValue diff --git a/vendor/github.com/open-policy-agent/opa/storage/doc.go b/vendor/github.com/open-policy-agent/opa/storage/doc.go index 6fa2f86d98d..c33db689edb 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/doc.go +++ b/vendor/github.com/open-policy-agent/opa/storage/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package storage exposes the policy engine's storage layer. +// +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. package storage diff --git a/vendor/github.com/open-policy-agent/opa/storage/errors.go b/vendor/github.com/open-policy-agent/opa/storage/errors.go index 8c789052ed4..1403b3a9887 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/errors.go +++ b/vendor/github.com/open-policy-agent/opa/storage/errors.go @@ -5,118 +5,69 @@ package storage import ( - "fmt" + v1 "github.com/open-policy-agent/opa/v1/storage" ) const ( // InternalErr indicates an unknown, internal error has occurred. - InternalErr = "storage_internal_error" + InternalErr = v1.InternalErr // NotFoundErr indicates the path used in the storage operation does not // locate a document. - NotFoundErr = "storage_not_found_error" + NotFoundErr = v1.NotFoundErr // WriteConflictErr indicates a write on the path enocuntered a conflicting // value inside the transaction. - WriteConflictErr = "storage_write_conflict_error" + WriteConflictErr = v1.WriteConflictErr // InvalidPatchErr indicates an invalid patch/write was issued. The patch // was rejected. - InvalidPatchErr = "storage_invalid_patch_error" + InvalidPatchErr = v1.InvalidPatchErr // InvalidTransactionErr indicates an invalid operation was performed // inside of the transaction. - InvalidTransactionErr = "storage_invalid_txn_error" + InvalidTransactionErr = v1.InvalidTransactionErr // TriggersNotSupportedErr indicates the caller attempted to register a // trigger against a store that does not support them. - TriggersNotSupportedErr = "storage_triggers_not_supported_error" + TriggersNotSupportedErr = v1.TriggersNotSupportedErr // WritesNotSupportedErr indicate the caller attempted to perform a write // against a store that does not support them. - WritesNotSupportedErr = "storage_writes_not_supported_error" + WritesNotSupportedErr = v1.WritesNotSupportedErr // PolicyNotSupportedErr indicate the caller attempted to perform a policy // management operation against a store that does not support them. - PolicyNotSupportedErr = "storage_policy_not_supported_error" + PolicyNotSupportedErr = v1.PolicyNotSupportedErr ) // Error is the error type returned by the storage layer. -type Error struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (err *Error) Error() string { - if err.Message != "" { - return fmt.Sprintf("%v: %v", err.Code, err.Message) - } - return err.Code -} +type Error = v1.Error // IsNotFound returns true if this error is a NotFoundErr. func IsNotFound(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == NotFoundErr - } - return false + return v1.IsNotFound(err) } // IsWriteConflictError returns true if this error a WriteConflictErr. func IsWriteConflictError(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == WriteConflictErr - } - return false + return v1.IsWriteConflictError(err) } // IsInvalidPatch returns true if this error is a InvalidPatchErr. func IsInvalidPatch(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == InvalidPatchErr - } - return false + return v1.IsInvalidPatch(err) } // IsInvalidTransaction returns true if this error is a InvalidTransactionErr. func IsInvalidTransaction(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == InvalidTransactionErr - } - return false + return v1.IsInvalidTransaction(err) } // IsIndexingNotSupported is a stub for backwards-compatibility. // // Deprecated: We no longer return IndexingNotSupported errors, so it is // unnecessary to check for them. -func IsIndexingNotSupported(error) bool { return false } - -func writeConflictError(path Path) *Error { - return &Error{ - Code: WriteConflictErr, - Message: path.String(), - } -} - -func triggersNotSupportedError() *Error { - return &Error{ - Code: TriggersNotSupportedErr, - } -} - -func writesNotSupportedError() *Error { - return &Error{ - Code: WritesNotSupportedErr, - } -} - -func policyNotSupportedError() *Error { - return &Error{ - Code: PolicyNotSupportedErr, - } +func IsIndexingNotSupported(err error) bool { + return v1.IsIndexingNotSupported(err) } diff --git a/vendor/github.com/open-policy-agent/opa/storage/interface.go b/vendor/github.com/open-policy-agent/opa/storage/interface.go index 6baca9a59f1..0192c459c83 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/interface.go +++ b/vendor/github.com/open-policy-agent/opa/storage/interface.go @@ -5,243 +5,82 @@ package storage import ( - "context" - - "github.com/open-policy-agent/opa/metrics" + v1 "github.com/open-policy-agent/opa/v1/storage" ) // Transaction defines the interface that identifies a consistent snapshot over // the policy engine's storage layer. -type Transaction interface { - ID() uint64 -} +type Transaction = v1.Transaction // Store defines the interface for the storage layer's backend. -type Store interface { - Trigger - Policy - - // NewTransaction is called create a new transaction in the store. - NewTransaction(context.Context, ...TransactionParams) (Transaction, error) - - // Read is called to fetch a document referred to by path. - Read(context.Context, Transaction, Path) (interface{}, error) - - // Write is called to modify a document referred to by path. - Write(context.Context, Transaction, PatchOp, Path, interface{}) error - - // Commit is called to finish the transaction. If Commit returns an error, the - // transaction must be automatically aborted by the Store implementation. - Commit(context.Context, Transaction) error - - // Truncate is called to make a copy of the underlying store, write documents in the new store - // by creating multiple transactions in the new store as needed and finally swapping - // over to the new storage instance. This method must be called within a transaction on the original store. - Truncate(context.Context, Transaction, TransactionParams, Iterator) error - - // Abort is called to cancel the transaction. - Abort(context.Context, Transaction) -} +type Store = v1.Store // MakeDirer defines the interface a Store could realize to override the // generic MakeDir functionality in storage.MakeDir -type MakeDirer interface { - MakeDir(context.Context, Transaction, Path) error -} +type MakeDirer = v1.MakeDirer // TransactionParams describes a new transaction. -type TransactionParams struct { - - // BasePaths indicates the top-level paths where write operations will be performed in this transaction. - BasePaths []string - - // RootOverwrite is deprecated. Use BasePaths instead. - RootOverwrite bool - - // Write indicates if this transaction will perform any write operations. - Write bool - - // Context contains key/value pairs passed to triggers. - Context *Context -} +type TransactionParams = v1.TransactionParams // Context is a simple container for key/value pairs. -type Context struct { - values map[interface{}]interface{} -} +type Context = v1.Context // NewContext returns a new context object. func NewContext() *Context { - return &Context{ - values: map[interface{}]interface{}{}, - } -} - -// Get returns the key value in the context. -func (ctx *Context) Get(key interface{}) interface{} { - if ctx == nil { - return nil - } - return ctx.values[key] -} - -// Put adds a key/value pair to the context. -func (ctx *Context) Put(key, value interface{}) { - ctx.values[key] = value -} - -var metricsKey = struct{}{} - -// WithMetrics allows passing metrics via the Context. -// It puts the metrics object in the ctx, and returns the same -// ctx (not a copy) for convenience. -func (ctx *Context) WithMetrics(m metrics.Metrics) *Context { - ctx.values[metricsKey] = m - return ctx -} - -// Metrics() allows using a Context's metrics. Returns nil if metrics -// were not attached to the Context. -func (ctx *Context) Metrics() metrics.Metrics { - if m, ok := ctx.values[metricsKey]; ok { - if met, ok := m.(metrics.Metrics); ok { - return met - } - } - return nil + return v1.NewContext() } // WriteParams specifies the TransactionParams for a write transaction. -var WriteParams = TransactionParams{ - Write: true, -} +var WriteParams = v1.WriteParams // PatchOp is the enumeration of supposed modifications. -type PatchOp int +type PatchOp = v1.PatchOp // Patch supports add, remove, and replace operations. const ( - AddOp PatchOp = iota - RemoveOp = iota - ReplaceOp = iota + AddOp = v1.AddOp + RemoveOp = v1.RemoveOp + ReplaceOp = v1.ReplaceOp ) // WritesNotSupported provides a default implementation of the write // interface which may be used if the backend does not support writes. -type WritesNotSupported struct{} - -func (WritesNotSupported) Write(context.Context, Transaction, PatchOp, Path, interface{}) error { - return writesNotSupportedError() -} +type WritesNotSupported = v1.WritesNotSupported // Policy defines the interface for policy module storage. -type Policy interface { - ListPolicies(context.Context, Transaction) ([]string, error) - GetPolicy(context.Context, Transaction, string) ([]byte, error) - UpsertPolicy(context.Context, Transaction, string, []byte) error - DeletePolicy(context.Context, Transaction, string) error -} +type Policy = v1.Policy // PolicyNotSupported provides a default implementation of the policy interface // which may be used if the backend does not support policy storage. -type PolicyNotSupported struct{} - -// ListPolicies always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) ListPolicies(context.Context, Transaction) ([]string, error) { - return nil, policyNotSupportedError() -} - -// GetPolicy always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) GetPolicy(context.Context, Transaction, string) ([]byte, error) { - return nil, policyNotSupportedError() -} - -// UpsertPolicy always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) UpsertPolicy(context.Context, Transaction, string, []byte) error { - return policyNotSupportedError() -} - -// DeletePolicy always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) DeletePolicy(context.Context, Transaction, string) error { - return policyNotSupportedError() -} +type PolicyNotSupported = v1.PolicyNotSupported // PolicyEvent describes a change to a policy. -type PolicyEvent struct { - ID string - Data []byte - Removed bool -} +type PolicyEvent = v1.PolicyEvent // DataEvent describes a change to a base data document. -type DataEvent struct { - Path Path - Data interface{} - Removed bool -} +type DataEvent = v1.DataEvent // TriggerEvent describes the changes that caused the trigger to be invoked. -type TriggerEvent struct { - Policy []PolicyEvent - Data []DataEvent - Context *Context -} - -// IsZero returns true if the TriggerEvent indicates no changes occurred. This -// function is primarily for test purposes. -func (e TriggerEvent) IsZero() bool { - return !e.PolicyChanged() && !e.DataChanged() -} - -// PolicyChanged returns true if the trigger was caused by a policy change. -func (e TriggerEvent) PolicyChanged() bool { - return len(e.Policy) > 0 -} - -// DataChanged returns true if the trigger was caused by a data change. -func (e TriggerEvent) DataChanged() bool { - return len(e.Data) > 0 -} +type TriggerEvent = v1.TriggerEvent // TriggerConfig contains the trigger registration configuration. -type TriggerConfig struct { - - // OnCommit is invoked when a transaction is successfully committed. The - // callback is invoked with a handle to the write transaction that - // successfully committed before other clients see the changes. - OnCommit func(context.Context, Transaction, TriggerEvent) -} +type TriggerConfig = v1.TriggerConfig // Trigger defines the interface that stores implement to register for change // notifications when the store is changed. -type Trigger interface { - Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) -} +type Trigger = v1.Trigger // TriggersNotSupported provides default implementations of the Trigger // interface which may be used if the backend does not support triggers. -type TriggersNotSupported struct{} - -// Register always returns an error indicating triggers are not supported. -func (TriggersNotSupported) Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) { - return nil, triggersNotSupportedError() -} +type TriggersNotSupported = v1.TriggersNotSupported // TriggerHandle defines the interface that can be used to unregister triggers that have // been registered on a Store. -type TriggerHandle interface { - Unregister(context.Context, Transaction) -} +type TriggerHandle = v1.TriggerHandle // Iterator defines the interface that can be used to read files from a directory starting with // files at the base of the directory, then sub-directories etc. -type Iterator interface { - Next() (*Update, error) -} +type Iterator = v1.Iterator // Update contains information about a file -type Update struct { - Path Path - Value []byte - IsPolicy bool -} +type Update = v1.Update diff --git a/vendor/github.com/open-policy-agent/opa/storage/path.go b/vendor/github.com/open-policy-agent/opa/storage/path.go index 02ef4cab408..91d4f34f2bc 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/path.go +++ b/vendor/github.com/open-policy-agent/opa/storage/path.go @@ -5,150 +5,30 @@ package storage import ( - "fmt" - "net/url" - "strconv" - "strings" - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/storage" ) // Path refers to a document in storage. -type Path []string +type Path = v1.Path // ParsePath returns a new path for the given str. func ParsePath(str string) (path Path, ok bool) { - if len(str) == 0 { - return nil, false - } - if str[0] != '/' { - return nil, false - } - if len(str) == 1 { - return Path{}, true - } - parts := strings.Split(str[1:], "/") - return parts, true + return v1.ParsePath(str) } // ParsePathEscaped returns a new path for the given escaped str. func ParsePathEscaped(str string) (path Path, ok bool) { - path, ok = ParsePath(str) - if !ok { - return - } - for i := range path { - segment, err := url.PathUnescape(path[i]) - if err == nil { - path[i] = segment - } - } - return + return v1.ParsePathEscaped(str) } // NewPathForRef returns a new path for the given ref. func NewPathForRef(ref ast.Ref) (path Path, err error) { - - if len(ref) == 0 { - return nil, fmt.Errorf("empty reference (indicates error in caller)") - } - - if len(ref) == 1 { - return Path{}, nil - } - - path = make(Path, 0, len(ref)-1) - - for _, term := range ref[1:] { - switch v := term.Value.(type) { - case ast.String: - path = append(path, string(v)) - case ast.Number: - path = append(path, v.String()) - case ast.Boolean, ast.Null: - return nil, &Error{ - Code: NotFoundErr, - Message: fmt.Sprintf("%v: does not exist", ref), - } - case *ast.Array, ast.Object, ast.Set: - return nil, fmt.Errorf("composites cannot be base document keys: %v", ref) - default: - return nil, fmt.Errorf("unresolved reference (indicates error in caller): %v", ref) - } - } - - return path, nil -} - -// Compare performs lexigraphical comparison on p and other and returns -1 if p -// is less than other, 0 if p is equal to other, or 1 if p is greater than -// other. -func (p Path) Compare(other Path) (cmp int) { - min := len(p) - if len(other) < min { - min = len(other) - } - for i := 0; i < min; i++ { - if cmp := strings.Compare(p[i], other[i]); cmp != 0 { - return cmp - } - } - if len(p) < len(other) { - return -1 - } - if len(p) == len(other) { - return 0 - } - return 1 -} - -// Equal returns true if p is the same as other. -func (p Path) Equal(other Path) bool { - return p.Compare(other) == 0 -} - -// HasPrefix returns true if p starts with other. -func (p Path) HasPrefix(other Path) bool { - if len(other) > len(p) { - return false - } - for i := range other { - if p[i] != other[i] { - return false - } - } - return true -} - -// Ref returns a ref that represents p rooted at head. -func (p Path) Ref(head *ast.Term) (ref ast.Ref) { - ref = make(ast.Ref, len(p)+1) - ref[0] = head - for i := range p { - idx, err := strconv.ParseInt(p[i], 10, 64) - if err == nil { - ref[i+1] = ast.UIntNumberTerm(uint64(idx)) - } else { - ref[i+1] = ast.StringTerm(p[i]) - } - } - return ref -} - -func (p Path) String() string { - buf := make([]string, len(p)) - for i := range buf { - buf[i] = url.PathEscape(p[i]) - } - return "/" + strings.Join(buf, "/") + return v1.NewPathForRef(ref) } // MustParsePath returns a new Path for s. If s cannot be parsed, this function // will panic. This is mostly for test purposes. func MustParsePath(s string) Path { - path, ok := ParsePath(s) - if !ok { - panic(s) - } - return path + return v1.MustParsePath(s) } diff --git a/vendor/github.com/open-policy-agent/opa/storage/storage.go b/vendor/github.com/open-policy-agent/opa/storage/storage.go index 2f8a39c597c..c02773d9851 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/storage.go +++ b/vendor/github.com/open-policy-agent/opa/storage/storage.go @@ -7,85 +7,34 @@ package storage import ( "context" - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/storage" ) // NewTransactionOrDie is a helper function to create a new transaction. If the // storage layer cannot create a new transaction, this function will panic. This // function should only be used for tests. func NewTransactionOrDie(ctx context.Context, store Store, params ...TransactionParams) Transaction { - txn, err := store.NewTransaction(ctx, params...) - if err != nil { - panic(err) - } - return txn + return v1.NewTransactionOrDie(ctx, store, params...) } // ReadOne is a convenience function to read a single value from the provided Store. It // will create a new Transaction to perform the read with, and clean up after itself // should an error occur. func ReadOne(ctx context.Context, store Store, path Path) (interface{}, error) { - txn, err := store.NewTransaction(ctx) - if err != nil { - return nil, err - } - defer store.Abort(ctx, txn) - - return store.Read(ctx, txn, path) + return v1.ReadOne(ctx, store, path) } // WriteOne is a convenience function to write a single value to the provided Store. It // will create a new Transaction to perform the write with, and clean up after itself // should an error occur. func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value interface{}) error { - txn, err := store.NewTransaction(ctx, WriteParams) - if err != nil { - return err - } - - if err := store.Write(ctx, txn, op, path, value); err != nil { - store.Abort(ctx, txn) - return err - } - - return store.Commit(ctx, txn) + return v1.WriteOne(ctx, store, op, path, value) } // MakeDir inserts an empty object at path. If the parent path does not exist, // MakeDir will create it recursively. func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error { - - // Allow the Store implementation to deal with this in its own way. - if md, ok := store.(MakeDirer); ok { - return md.MakeDir(ctx, txn, path) - } - - if len(path) == 0 { - return nil - } - - node, err := store.Read(ctx, txn, path) - if err != nil { - if !IsNotFound(err) { - return err - } - - if err := MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { - return err - } - - return store.Write(ctx, txn, AddOp, path, map[string]interface{}{}) - } - - if _, ok := node.(map[string]interface{}); ok { - return nil - } - - if _, ok := node.(ast.Object); ok { - return nil - } - - return writeConflictError(path) + return v1.MakeDir(ctx, store, txn, path) } // Txn is a convenience function that executes f inside a new transaction @@ -93,44 +42,12 @@ func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error // aborted and the error is returned. Otherwise, the transaction is committed // and the result of the commit is returned. func Txn(ctx context.Context, store Store, params TransactionParams, f func(Transaction) error) error { - - txn, err := store.NewTransaction(ctx, params) - if err != nil { - return err - } - - if err := f(txn); err != nil { - store.Abort(ctx, txn) - return err - } - - return store.Commit(ctx, txn) + return v1.Txn(ctx, store, params, f) } // NonEmpty returns a function that tests if a path is non-empty. A // path is non-empty if a Read on the path returns a value or a Read // on any of the path prefixes returns a non-object value. func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string) (bool, error) { - return func(path []string) (bool, error) { - if _, err := store.Read(ctx, txn, Path(path)); err == nil { - return true, nil - } else if !IsNotFound(err) { - return false, err - } - for i := len(path) - 1; i > 0; i-- { - val, err := store.Read(ctx, txn, Path(path[:i])) - if err != nil && !IsNotFound(err) { - return false, err - } else if err == nil { - if _, ok := val.(map[string]interface{}); ok { - return false, nil - } - if _, ok := val.(ast.Object); ok { - return false, nil - } - return true, nil - } - } - return false, nil - } + return v1.NonEmpty(ctx, store, txn) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go b/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go new file mode 100644 index 00000000000..c2ee0eca7fd --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go @@ -0,0 +1,8 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package print diff --git a/vendor/github.com/open-policy-agent/opa/topdown/print/print.go b/vendor/github.com/open-policy-agent/opa/topdown/print/print.go index 0fb6abdca8b..66ffbb176f4 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/print/print.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/print/print.go @@ -1,21 +1,14 @@ package print import ( - "context" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/topdown/print" ) // Context provides the Hook implementation context about the print() call. -type Context struct { - Context context.Context // request context passed when query executed - Location *ast.Location // location of print call -} +type Context = v1.Context // Hook defines the interface that callers can implement to receive print // statement outputs. If the hook returns an error, it will be surfaced if // strict builtin error checking is enabled (otherwise, it will not halt // execution.) -type Hook interface { - Print(Context, string) error -} +type Hook = v1.Hook diff --git a/vendor/github.com/open-policy-agent/opa/topdown/type_name.go b/vendor/github.com/open-policy-agent/opa/topdown/type_name.go deleted file mode 100644 index 0a8b44aed3f..00000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/type_name.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - - "github.com/open-policy-agent/opa/ast" -) - -func builtinTypeName(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Null: - return iter(ast.StringTerm("null")) - case ast.Boolean: - return iter(ast.StringTerm("boolean")) - case ast.Number: - return iter(ast.StringTerm("number")) - case ast.String: - return iter(ast.StringTerm("string")) - case *ast.Array: - return iter(ast.StringTerm("array")) - case ast.Object: - return iter(ast.StringTerm("object")) - case ast.Set: - return iter(ast.StringTerm("set")) - } - - return fmt.Errorf("illegal value") -} - -func init() { - RegisterBuiltinFunc(ast.TypeNameBuiltin.Name, builtinTypeName) -} diff --git a/vendor/github.com/open-policy-agent/opa/types/decode.go b/vendor/github.com/open-policy-agent/opa/types/decode.go index a6bd9ea030b..ae04b38ff4e 100644 --- a/vendor/github.com/open-policy-agent/opa/types/decode.go +++ b/vendor/github.com/open-policy-agent/opa/types/decode.go @@ -5,187 +5,10 @@ package types import ( - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/util" -) - -const ( - typeNull = "null" - typeBoolean = "boolean" - typeNumber = "number" - typeString = "string" - typeArray = "array" - typeSet = "set" - typeObject = "object" - typeAny = "any" - typeFunction = "function" + v1 "github.com/open-policy-agent/opa/v1/types" ) // Unmarshal deserializes bs and returns the resulting type. func Unmarshal(bs []byte) (result Type, err error) { - - var hint rawtype - - if err = util.UnmarshalJSON(bs, &hint); err == nil { - switch hint.Type { - case typeNull: - result = NewNull() - case typeBoolean: - result = NewBoolean() - case typeNumber: - result = NewNumber() - case typeString: - result = NewString() - case typeArray: - var arr rawarray - if err = util.UnmarshalJSON(bs, &arr); err == nil { - var err error - var static []Type - var dynamic Type - if static, err = unmarshalSlice(arr.Static); err != nil { - return nil, err - } - if len(arr.Dynamic) != 0 { - if dynamic, err = Unmarshal(arr.Dynamic); err != nil { - return nil, err - } - } - result = NewArray(static, dynamic) - } - case typeObject: - var obj rawobject - if err = util.UnmarshalJSON(bs, &obj); err == nil { - var err error - var static []*StaticProperty - var dynamic *DynamicProperty - if static, err = unmarshalStaticPropertySlice(obj.Static); err != nil { - return nil, err - } - if dynamic, err = unmarshalDynamicProperty(obj.Dynamic); err != nil { - return nil, err - } - result = NewObject(static, dynamic) - } - case typeSet: - var set rawset - if err = util.UnmarshalJSON(bs, &set); err == nil { - var of Type - if of, err = Unmarshal(set.Of); err == nil { - result = NewSet(of) - } - } - case typeAny: - var union rawunion - if err = util.UnmarshalJSON(bs, &union); err == nil { - var of []Type - if of, err = unmarshalSlice(union.Of); err == nil { - result = NewAny(of...) - } - } - case typeFunction: - var decl rawdecl - if err = util.UnmarshalJSON(bs, &decl); err == nil { - args, err := unmarshalSlice(decl.Args) - if err != nil { - return nil, err - } - var ret Type - if len(decl.Result) > 0 { - ret, err = Unmarshal(decl.Result) - if err != nil { - return nil, err - } - } - if len(decl.Variadic) > 0 { - varargs, err := Unmarshal(decl.Variadic) - if err != nil { - return nil, err - } - result = NewVariadicFunction(args, varargs, ret) - } else { - result = NewFunction(args, ret) - } - } - default: - err = fmt.Errorf("unsupported type '%v'", hint.Type) - } - } - - return result, err -} - -type rawtype struct { - Type string `json:"type"` -} - -type rawarray struct { - Static []json.RawMessage `json:"static"` - Dynamic json.RawMessage `json:"dynamic"` -} - -type rawobject struct { - Static []rawstaticproperty `json:"static"` - Dynamic rawdynamicproperty `json:"dynamic"` -} - -type rawstaticproperty struct { - Key interface{} `json:"key"` - Value json.RawMessage `json:"value"` -} - -type rawdynamicproperty struct { - Key json.RawMessage `json:"key"` - Value json.RawMessage `json:"value"` -} - -type rawset struct { - Of json.RawMessage `json:"of"` -} - -type rawunion struct { - Of []json.RawMessage `json:"of"` -} - -type rawdecl struct { - Args []json.RawMessage `json:"args"` - Result json.RawMessage `json:"result"` - Variadic json.RawMessage `json:"variadic"` -} - -func unmarshalSlice(elems []json.RawMessage) (result []Type, err error) { - result = make([]Type, len(elems)) - for i := range elems { - if result[i], err = Unmarshal(elems[i]); err != nil { - return nil, err - } - } - return result, err -} - -func unmarshalStaticPropertySlice(elems []rawstaticproperty) (result []*StaticProperty, err error) { - result = make([]*StaticProperty, len(elems)) - for i := range elems { - value, err := Unmarshal(elems[i].Value) - if err != nil { - return nil, err - } - result[i] = NewStaticProperty(elems[i].Key, value) - } - return result, err -} - -func unmarshalDynamicProperty(x rawdynamicproperty) (result *DynamicProperty, err error) { - if len(x.Key) == 0 { - return nil, nil - } - var key Type - if key, err = Unmarshal(x.Key); err == nil { - var value Type - if value, err = Unmarshal(x.Value); err == nil { - return NewDynamicProperty(key, value), nil - } - } - return nil, err + return v1.Unmarshal(bs) } diff --git a/vendor/github.com/open-policy-agent/opa/types/doc.go b/vendor/github.com/open-policy-agent/opa/types/doc.go new file mode 100644 index 00000000000..bfa068e66b6 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/types/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package types diff --git a/vendor/github.com/open-policy-agent/opa/types/types.go b/vendor/github.com/open-policy-agent/opa/types/types.go index 2a050927dd0..b888b27b60e 100644 --- a/vendor/github.com/open-policy-agent/opa/types/types.go +++ b/vendor/github.com/open-policy-agent/opa/types/types.go @@ -7,1197 +7,194 @@ package types import ( - "encoding/json" - "fmt" - "sort" - "strings" - - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/types" ) // Sprint returns the string representation of the type. func Sprint(x Type) string { - if x == nil { - return "???" - } - return x.String() + return v1.Sprint(x) } // Type represents a type of a term in the language. -type Type interface { - String() string - typeMarker() string - json.Marshaler -} - -func (Null) typeMarker() string { return typeNull } -func (Boolean) typeMarker() string { return typeBoolean } -func (Number) typeMarker() string { return typeNumber } -func (String) typeMarker() string { return typeString } -func (*Array) typeMarker() string { return typeArray } -func (*Object) typeMarker() string { return typeObject } -func (*Set) typeMarker() string { return typeSet } -func (Any) typeMarker() string { return typeAny } -func (Function) typeMarker() string { return typeFunction } +type Type = v1.Type // Null represents the null type. -type Null struct{} +type Null = v1.Null // NewNull returns a new Null type. func NewNull() Null { - return Null{} + return v1.NewNull() } // NamedType represents a type alias with an arbitrary name and description. // This is useful for generating documentation for built-in functions. -type NamedType struct { - Name, Descr string - Type Type -} - -func (n *NamedType) typeMarker() string { return n.Type.typeMarker() } -func (n *NamedType) String() string { return n.Name + ": " + n.Type.String() } -func (n *NamedType) MarshalJSON() ([]byte, error) { - var obj map[string]interface{} - switch x := n.Type.(type) { - case interface{ toMap() map[string]interface{} }: - obj = x.toMap() - default: - obj = map[string]interface{}{ - "type": n.Type.typeMarker(), - } - } - obj["name"] = n.Name - if n.Descr != "" { - obj["description"] = n.Descr - } - return json.Marshal(obj) -} - -func (n *NamedType) Description(d string) *NamedType { - n.Descr = d - return n -} +type NamedType = v1.NamedType // Named returns the passed type as a named type. // Named types are only valid at the top level of built-in functions. // Note that nested named types cause panic. func Named(name string, t Type) *NamedType { - return &NamedType{ - Type: t, - Name: name, - } -} - -// MarshalJSON returns the JSON encoding of t. -func (t Null) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "type": t.typeMarker(), - }) -} - -func unwrap(t Type) Type { - switch t := t.(type) { - case *NamedType: - return t.Type - default: - return t - } -} - -func (t Null) String() string { - return typeNull + return v1.Named(name, t) } // Boolean represents the boolean type. -type Boolean struct{} +type Boolean = v1.Boolean // B represents an instance of the boolean type. var B = NewBoolean() // NewBoolean returns a new Boolean type. func NewBoolean() Boolean { - return Boolean{} -} - -// MarshalJSON returns the JSON encoding of t. -func (t Boolean) MarshalJSON() ([]byte, error) { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - return json.Marshal(repr) -} - -func (t Boolean) String() string { - return t.typeMarker() + return v1.NewBoolean() } // String represents the string type. -type String struct{} +type String = v1.String // S represents an instance of the string type. var S = NewString() // NewString returns a new String type. func NewString() String { - return String{} -} - -// MarshalJSON returns the JSON encoding of t. -func (t String) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "type": t.typeMarker(), - }) -} - -func (String) String() string { - return typeString + return v1.NewString() } // Number represents the number type. -type Number struct{} +type Number = v1.Number // N represents an instance of the number type. var N = NewNumber() // NewNumber returns a new Number type. func NewNumber() Number { - return Number{} -} - -// MarshalJSON returns the JSON encoding of t. -func (t Number) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "type": t.typeMarker(), - }) -} - -func (Number) String() string { - return typeNumber + return v1.NewNumber() } // Array represents the array type. -type Array struct { - static []Type // static items - dynamic Type // dynamic items -} +type Array = v1.Array // NewArray returns a new Array type. func NewArray(static []Type, dynamic Type) *Array { - return &Array{ - static: static, - dynamic: dynamic, - } -} - -// MarshalJSON returns the JSON encoding of t. -func (t *Array) MarshalJSON() ([]byte, error) { - return json.Marshal(t.toMap()) -} - -func (t *Array) toMap() map[string]interface{} { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if len(t.static) != 0 { - repr["static"] = t.static - } - if t.dynamic != nil { - repr["dynamic"] = t.dynamic - } - return repr -} - -func (t *Array) String() string { - prefix := "array" - buf := []string{} - for _, tpe := range t.static { - buf = append(buf, Sprint(tpe)) - } - repr := prefix - if len(buf) > 0 { - repr += "<" + strings.Join(buf, ", ") + ">" - } - if t.dynamic != nil { - repr += "[" + t.dynamic.String() + "]" - } - return repr -} - -// Dynamic returns the type of the array's dynamic elements. -func (t *Array) Dynamic() Type { - return t.dynamic -} - -// Len returns the number of static array elements. -func (t *Array) Len() int { - return len(t.static) -} - -// Select returns the type of element at the zero-based pos. -func (t *Array) Select(pos int) Type { - if pos >= 0 { - if len(t.static) > pos { - return t.static[pos] - } - if t.dynamic != nil { - return t.dynamic - } - } - return nil + return v1.NewArray(static, dynamic) } // Set represents the set type. -type Set struct { - of Type -} +type Set = v1.Set // NewSet returns a new Set type. func NewSet(of Type) *Set { - return &Set{ - of: of, - } -} - -func (t *Set) Of() Type { - return t.of -} - -// MarshalJSON returns the JSON encoding of t. -func (t *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(t.toMap()) -} - -func (t *Set) toMap() map[string]interface{} { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if t.of != nil { - repr["of"] = t.of - } - return repr -} - -func (t *Set) String() string { - prefix := typeSet - return prefix + "[" + Sprint(t.of) + "]" + return v1.NewSet(of) } // StaticProperty represents a static object property. -type StaticProperty struct { - Key interface{} - Value Type -} +type StaticProperty = v1.StaticProperty // NewStaticProperty returns a new StaticProperty object. func NewStaticProperty(key interface{}, value Type) *StaticProperty { - return &StaticProperty{ - Key: key, - Value: value, - } -} - -// MarshalJSON returns the JSON encoding of p. -func (p *StaticProperty) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "key": p.Key, - "value": p.Value, - }) + return v1.NewStaticProperty(key, value) } // DynamicProperty represents a dynamic object property. -type DynamicProperty struct { - Key Type - Value Type -} +type DynamicProperty = v1.DynamicProperty // NewDynamicProperty returns a new DynamicProperty object. func NewDynamicProperty(key, value Type) *DynamicProperty { - return &DynamicProperty{ - Key: key, - Value: value, - } -} - -// MarshalJSON returns the JSON encoding of p. -func (p *DynamicProperty) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "key": p.Key, - "value": p.Value, - }) -} - -func (p *DynamicProperty) String() string { - return fmt.Sprintf("%s: %s", Sprint(p.Key), Sprint(p.Value)) + return v1.NewDynamicProperty(key, value) } // Object represents the object type. -type Object struct { - static []*StaticProperty // constant properties - dynamic *DynamicProperty // dynamic properties -} +type Object = v1.Object // NewObject returns a new Object type. func NewObject(static []*StaticProperty, dynamic *DynamicProperty) *Object { - sort.Slice(static, func(i, j int) bool { - cmp := util.Compare(static[i].Key, static[j].Key) - return cmp == -1 - }) - return &Object{ - static: static, - dynamic: dynamic, - } -} - -func (t *Object) String() string { - prefix := "object" - buf := make([]string, 0, len(t.static)) - for _, p := range t.static { - buf = append(buf, fmt.Sprintf("%v: %v", p.Key, Sprint(p.Value))) - } - repr := prefix - if len(buf) > 0 { - repr += "<" + strings.Join(buf, ", ") + ">" - } - if t.dynamic != nil { - repr += "[" + t.dynamic.String() + "]" - } - return repr -} - -// DynamicValue returns the type of the object's dynamic elements. -func (t *Object) DynamicValue() Type { - if t.dynamic == nil { - return nil - } - return t.dynamic.Value -} - -// DynamicProperties returns the type of the object's dynamic elements. -func (t *Object) DynamicProperties() *DynamicProperty { - return t.dynamic -} - -// StaticProperties returns the type of the object's static elements. -func (t *Object) StaticProperties() []*StaticProperty { - return t.static -} - -// Keys returns the keys of the object's static elements. -func (t *Object) Keys() []interface{} { - sl := make([]interface{}, 0, len(t.static)) - for _, p := range t.static { - sl = append(sl, p.Key) - } - return sl -} - -// MarshalJSON returns the JSON encoding of t. -func (t *Object) MarshalJSON() ([]byte, error) { - return json.Marshal(t.toMap()) -} - -func (t *Object) toMap() map[string]interface{} { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if len(t.static) != 0 { - repr["static"] = t.static - } - if t.dynamic != nil { - repr["dynamic"] = t.dynamic - } - return repr -} - -// Select returns the type of the named property. -func (t *Object) Select(name interface{}) Type { - pos := sort.Search(len(t.static), func(x int) bool { - return util.Compare(t.static[x].Key, name) >= 0 - }) - - if pos < len(t.static) && util.Compare(t.static[pos].Key, name) == 0 { - return t.static[pos].Value - } - - if t.dynamic != nil { - if Contains(t.dynamic.Key, TypeOf(name)) { - return t.dynamic.Value - } - } - - return nil -} - -func (t *Object) Merge(other Type) *Object { - if otherObj, ok := other.(*Object); ok { - return mergeObjects(t, otherObj) - } - - var typeK Type - var typeV Type - dynProps := t.DynamicProperties() - if dynProps != nil { - typeK = Or(Keys(other), dynProps.Key) - typeV = Or(Values(other), dynProps.Value) - dynProps = NewDynamicProperty(typeK, typeV) - } else { - typeK = Keys(other) - typeV = Values(other) - if typeK != nil && typeV != nil { - dynProps = NewDynamicProperty(typeK, typeV) - } - } - - return NewObject(t.StaticProperties(), dynProps) -} - -func mergeObjects(a, b *Object) *Object { - var dynamicProps *DynamicProperty - if a.dynamic != nil && b.dynamic != nil { - typeK := Or(a.dynamic.Key, b.dynamic.Key) - var typeV Type - aObj, aIsObj := a.dynamic.Value.(*Object) - bObj, bIsObj := b.dynamic.Value.(*Object) - if aIsObj && bIsObj { - typeV = mergeObjects(aObj, bObj) - } else { - typeV = Or(a.dynamic.Value, b.dynamic.Value) - } - dynamicProps = NewDynamicProperty(typeK, typeV) - } else if a.dynamic != nil { - dynamicProps = a.dynamic - } else { - dynamicProps = b.dynamic - } - - staticPropsMap := make(map[interface{}]Type) - - for _, sp := range a.static { - staticPropsMap[sp.Key] = sp.Value - } - - for _, sp := range b.static { - currV := staticPropsMap[sp.Key] - if currV != nil { - currVObj, currVIsObj := currV.(*Object) - spVObj, spVIsObj := sp.Value.(*Object) - if currVIsObj && spVIsObj { - staticPropsMap[sp.Key] = mergeObjects(currVObj, spVObj) - } else { - staticPropsMap[sp.Key] = Or(currV, sp.Value) - } - } else { - staticPropsMap[sp.Key] = sp.Value - } - } - - staticProps := make([]*StaticProperty, 0, len(staticPropsMap)) - for k, v := range staticPropsMap { - staticProps = append(staticProps, NewStaticProperty(k, v)) - } - - return NewObject(staticProps, dynamicProps) + return v1.NewObject(static, dynamic) } // Any represents a dynamic type. -type Any []Type +type Any = v1.Any // A represents the superset of all types. var A = NewAny() // NewAny returns a new Any type. func NewAny(of ...Type) Any { - sl := make(Any, len(of)) - copy(sl, of) - sort.Sort(typeSlice(sl)) - return sl -} - -// Contains returns true if t is a superset of other. -func (t Any) Contains(other Type) bool { - if _, ok := other.(*Function); ok { - return false - } - // Note(philipc): We used to do this as a linear search. - // Since this is always sorted, we can use a binary search instead. - i := sort.Search(len(t), func(i int) bool { - return Compare(t[i], other) >= 0 - }) - if i < len(t) && Compare(t[i], other) == 0 { - // x is present at t[i] - return true - } - return len(t) == 0 -} - -// MarshalJSON returns the JSON encoding of t. -func (t Any) MarshalJSON() ([]byte, error) { - return json.Marshal(t.toMap()) -} - -func (t Any) toMap() map[string]interface{} { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if len(t) != 0 { - repr["of"] = []Type(t) - } - return repr -} - -// Merge return a new Any type that is the superset of t and other. -func (t Any) Merge(other Type) Any { - if otherAny, ok := other.(Any); ok { - return t.Union(otherAny) - } - if t.Contains(other) { - return t - } - cpy := make(Any, len(t)+1) - idx := sort.Search(len(t), func(i int) bool { - return Compare(t[i], other) >= 0 - }) - copy(cpy, t[:idx]) - cpy[idx] = other - copy(cpy[idx+1:], t[idx:]) - return cpy -} - -// Union returns a new Any type that is the union of the two Any types. -// Note(philipc): The two Any slices MUST be sorted before running Union, -// or else this method will fail to merge the two slices correctly. -func (t Any) Union(other Any) Any { - lenT := len(t) - lenOther := len(other) - // Return the more general (blank) Any type if present. - if lenT == 0 { - return t - } - if lenOther == 0 { - return other - } - // Prealloc the output list. - maxLen := lenT - if lenT < lenOther { - maxLen = lenOther - } - merged := make(Any, 0, maxLen) - // Note(philipc): Create a merged slice, doing the minimum number of - // comparisons along the way. We treat this as a problem of merging two - // sorted lists that might have duplicates. This specifically saves us - // from cases where one list might be *much* longer than the other. - // Algorithm: - // Assume: - // - List A - // - List B - // - List Output - // - Idx_a, Idx_b - // Procedure: - // - While Idx_a < len(A) and Idx_b < len(B) - // - Compare head(A) and head(B) - // - Cases: - // - A < B: Append head(A) to Output, advance Idx_a - // - A == B: Append head(A) to Output, advance Idx_a, Idx_b - // - A > B: Append head(B) to Output, advance Idx_b - // - Return output - idxA := 0 - idxB := 0 - for idxA < lenT || idxB < lenOther { - // Early-exit cases: - if idxA == lenT { - // Ran out of elements in t. Copy over what's left from other. - merged = append(merged, other[idxB:]...) - break - } else if idxB == lenOther { - // Ran out of elements in other. Copy over what's left from t. - merged = append(merged, t[idxA:]...) - break - } - // Normal selection of next element to merge: - switch Compare(t[idxA], other[idxB]) { - // A < B: - case -1: - merged = append(merged, t[idxA]) - idxA++ - // A == B: - case 0: - merged = append(merged, t[idxA]) - idxA++ - idxB++ - // A > B: - case 1: - merged = append(merged, other[idxB]) - idxB++ - } - } - return merged -} - -func (t Any) String() string { - prefix := "any" - if len(t) == 0 { - return prefix - } - buf := make([]string, len(t)) - for i := range t { - buf[i] = Sprint(t[i]) - } - return prefix + "<" + strings.Join(buf, ", ") + ">" + return v1.NewAny(of...) } // Function represents a function type. -type Function struct { - args []Type - result Type - variadic Type -} +type Function = v1.Function // Args returns an argument list. func Args(x ...Type) []Type { - return x + return v1.Args(x...) } // Void returns true if the function has no return value. This function returns // false if x is not a function. func Void(x Type) bool { - f, ok := x.(*Function) - return ok && f.Result() == nil + return v1.Void(x) } // Arity returns the number of arguments in the function signature or zero if x // is not a function. If the type is unknown, this function returns -1. func Arity(x Type) int { - if x == nil { - return -1 - } - f, ok := x.(*Function) - if !ok { - return 0 - } - return len(f.FuncArgs().Args) + return v1.Arity(x) } // NewFunction returns a new Function object of the given argument and result types. func NewFunction(args []Type, result Type) *Function { - return &Function{ - args: args, - result: result, - } + return v1.NewFunction(args, result) } // NewVariadicFunction returns a new Function object. This function sets the // variadic bit on the signature. Non-void variadic functions are not currently // supported. func NewVariadicFunction(args []Type, varargs Type, result Type) *Function { - if result != nil { - panic("illegal value: non-void variadic functions not supported") - } - return &Function{ - args: args, - variadic: varargs, - result: nil, - } -} - -// FuncArgs returns the function's arguments. -func (t *Function) FuncArgs() FuncArgs { - return FuncArgs{Args: t.Args(), Variadic: unwrap(t.variadic)} -} - -// NamedFuncArgs returns the function's arguments, with a name and -// description if available. -func (t *Function) NamedFuncArgs() FuncArgs { - args := make([]Type, len(t.args)) - copy(args, t.args) - return FuncArgs{Args: args, Variadic: t.variadic} -} - -// Args returns the function's arguments as a slice, ignoring variadic arguments. -// Deprecated: Use FuncArgs instead. -func (t *Function) Args() []Type { - cpy := make([]Type, len(t.args)) - for i := range t.args { - cpy[i] = unwrap(t.args[i]) - } - return cpy -} - -// Result returns the function's result type. -func (t *Function) Result() Type { - return unwrap(t.result) -} - -// Result returns the function's result type, without stripping name and description. -func (t *Function) NamedResult() Type { - return t.result -} - -func (t *Function) String() string { - return fmt.Sprintf("%v => %v", t.FuncArgs(), Sprint(t.Result())) -} - -// MarshalJSON returns the JSON encoding of t. -func (t *Function) MarshalJSON() ([]byte, error) { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if len(t.args) > 0 { - repr["args"] = t.args - } - if t.result != nil { - repr["result"] = t.result - } - if t.variadic != nil { - repr["variadic"] = t.variadic - } - return json.Marshal(repr) -} - -// UnmarshalJSON decodes the JSON serialized function declaration. -func (t *Function) UnmarshalJSON(bs []byte) error { - tpe, err := Unmarshal(bs) - if err != nil { - return err - } - - f, ok := tpe.(*Function) - if !ok { - return fmt.Errorf("invalid type") - } - - *t = *f - return nil -} - -// Union returns a new function representing the union of t and other. Functions -// must have the same arity to be unioned. -func (t *Function) Union(other *Function) *Function { - if other == nil { - return t - } - if t == nil { - return other - } - - a := t.Args() - b := other.Args() - if len(a) != len(b) { - return nil - } - - aIsVariadic := t.FuncArgs().Variadic != nil - bIsVariadic := other.FuncArgs().Variadic != nil - - if aIsVariadic && !bIsVariadic { - return nil - } else if bIsVariadic && !aIsVariadic { - return nil - } - - args := make([]Type, len(a)) - for i := range a { - args[i] = Or(a[i], b[i]) - } - - result := NewFunction(args, Or(t.Result(), other.Result())) - result.variadic = Or(t.FuncArgs().Variadic, other.FuncArgs().Variadic) - - return result + return v1.NewVariadicFunction(args, varargs, result) } // FuncArgs represents the arguments that can be passed to a function. -type FuncArgs struct { - Args []Type `json:"args,omitempty"` - Variadic Type `json:"variadic,omitempty"` -} - -func (a FuncArgs) String() string { - buf := make([]string, 0, len(a.Args)+1) - for i := range a.Args { - buf = append(buf, Sprint(a.Args[i])) - } - if a.Variadic != nil { - buf = append(buf, Sprint(a.Variadic)+"...") - } - return "(" + strings.Join(buf, ", ") + ")" -} - -// Arg returns the nth argument's type. -func (a FuncArgs) Arg(x int) Type { - if x < len(a.Args) { - return a.Args[x] - } - return a.Variadic -} +type FuncArgs = v1.FuncArgs // Compare returns -1, 0, 1 based on comparison between a and b. func Compare(a, b Type) int { - a, b = unwrap(a), unwrap(b) - x := typeOrder(a) - y := typeOrder(b) - if x > y { - return 1 - } else if x < y { - return -1 - } - switch a.(type) { - case nil, Null, Boolean, Number, String: - return 0 - case *Array: - arrA := a.(*Array) - arrB := b.(*Array) - if arrA.dynamic != nil && arrB.dynamic == nil { - return 1 - } else if arrB.dynamic != nil && arrA.dynamic == nil { - return -1 - } - if arrB.dynamic != nil && arrA.dynamic != nil { - if cmp := Compare(arrA.dynamic, arrB.dynamic); cmp != 0 { - return cmp - } - } - return typeSliceCompare(arrA.static, arrB.static) - case *Object: - objA := a.(*Object) - objB := b.(*Object) - if objA.dynamic != nil && objB.dynamic == nil { - return 1 - } else if objB.dynamic != nil && objA.dynamic == nil { - return -1 - } - if objA.dynamic != nil && objB.dynamic != nil { - if cmp := Compare(objA.dynamic.Key, objB.dynamic.Key); cmp != 0 { - return cmp - } - if cmp := Compare(objA.dynamic.Value, objB.dynamic.Value); cmp != 0 { - return cmp - } - } - - lenStaticA := len(objA.static) - lenStaticB := len(objB.static) - - minLen := lenStaticA - if lenStaticB < minLen { - minLen = lenStaticB - } - - for i := 0; i < minLen; i++ { - if cmp := util.Compare(objA.static[i].Key, objB.static[i].Key); cmp != 0 { - return cmp - } - if cmp := Compare(objA.static[i].Value, objB.static[i].Value); cmp != 0 { - return cmp - } - } - - if lenStaticA < lenStaticB { - return -1 - } else if lenStaticB < lenStaticA { - return 1 - } - - return 0 - case *Set: - setA := a.(*Set) - setB := b.(*Set) - if setA.of == nil && setB.of == nil { - return 0 - } else if setA.of == nil { - return -1 - } else if setB.of == nil { - return 1 - } - return Compare(setA.of, setB.of) - case Any: - sl1 := typeSlice(a.(Any)) - sl2 := typeSlice(b.(Any)) - return typeSliceCompare(sl1, sl2) - case *Function: - fA := a.(*Function) - fB := b.(*Function) - if len(fA.args) < len(fB.args) { - return -1 - } else if len(fA.args) > len(fB.args) { - return 1 - } - for i := 0; i < len(fA.args); i++ { - if cmp := Compare(fA.args[i], fB.args[i]); cmp != 0 { - return cmp - } - } - if cmp := Compare(fA.result, fB.result); cmp != 0 { - return cmp - } - return Compare(fA.variadic, fB.variadic) - default: - panic("unreachable") - } + return v1.Compare(a, b) } // Contains returns true if a is a superset or equal to b. func Contains(a, b Type) bool { - if x, ok := unwrap(a).(Any); ok { - return x.Contains(b) - } - return Compare(a, b) == 0 + return v1.Contains(a, b) } // Or returns a type that represents the union of a and b. If one type is a // superset of the other, the superset is returned unchanged. func Or(a, b Type) Type { - a, b = unwrap(a), unwrap(b) - if a == nil { - return b - } else if b == nil { - return a - } - fA, ok1 := a.(*Function) - fB, ok2 := b.(*Function) - if ok1 && ok2 { - return fA.Union(fB) - } else if ok1 || ok2 { - return nil - } - anyA, ok1 := a.(Any) - anyB, ok2 := b.(Any) - if ok1 { - return anyA.Merge(b) - } - if ok2 { - return anyB.Merge(a) - } - if Compare(a, b) == 0 { - return a - } - return NewAny(a, b) + return v1.Or(a, b) } // Select returns a property or item of a. func Select(a Type, x interface{}) Type { - switch a := unwrap(a).(type) { - case *Array: - n, ok := x.(json.Number) - if !ok { - return nil - } - pos, err := n.Int64() - if err != nil { - return nil - } - return a.Select(int(pos)) - case *Object: - return a.Select(x) - case *Set: - tpe := TypeOf(x) - if Compare(a.of, tpe) == 0 { - return a.of - } - if x, ok := a.of.(Any); ok { - if x.Contains(tpe) { - return tpe - } - } - return nil - case Any: - if Compare(a, A) == 0 { - return A - } - var tpe Type - for i := range a { - // TODO(tsandall): test nil/nil - tpe = Or(Select(a[i], x), tpe) - } - return tpe - default: - return nil - } + return v1.Select(a, x) } // Keys returns the type of keys that can be enumerated for a. For arrays, the // keys are always number types, for objects the keys are always string types, // and for sets the keys are always the type of the set element. func Keys(a Type) Type { - switch a := unwrap(a).(type) { - case *Array: - return N - case *Object: - var tpe Type - for _, k := range a.Keys() { - tpe = Or(tpe, TypeOf(k)) - } - if a.dynamic != nil { - tpe = Or(tpe, a.dynamic.Key) - } - return tpe - case *Set: - return a.of - case Any: - // TODO(tsandall): ditto test - if Compare(a, A) == 0 { - return A - } - var tpe Type - for i := range a { - tpe = Or(Keys(a[i]), tpe) - } - return tpe - } - return nil + return v1.Keys(a) } // Values returns the type of values that can be enumerated for a. func Values(a Type) Type { - switch a := unwrap(a).(type) { - case *Array: - var tpe Type - for i := range a.static { - tpe = Or(tpe, a.static[i]) - } - return Or(tpe, a.dynamic) - case *Object: - var tpe Type - for i := range a.static { - tpe = Or(tpe, a.static[i].Value) - } - if a.dynamic != nil { - tpe = Or(tpe, a.dynamic.Value) - } - return tpe - case *Set: - return a.of - case Any: - if Compare(a, A) == 0 { - return A - } - var tpe Type - for i := range a { - tpe = Or(Values(a[i]), tpe) - } - return tpe - } - return nil + return v1.Values(a) } // Nil returns true if a's type is unknown. func Nil(a Type) bool { - switch a := unwrap(a).(type) { - case nil: - return true - case *Function: - for i := range a.args { - if Nil(a.args[i]) { - return true - } - } - return Nil(a.result) - case *Array: - for i := range a.static { - if Nil(a.static[i]) { - return true - } - } - if a.dynamic != nil { - return Nil(a.dynamic) - } - case *Object: - for i := range a.static { - if Nil(a.static[i].Value) { - return true - } - } - if a.dynamic != nil { - return Nil(a.dynamic.Key) || Nil(a.dynamic.Value) - } - case *Set: - return Nil(a.of) - } - return false + return v1.Nil(a) } // TypeOf returns the type of the Golang native value. func TypeOf(x interface{}) Type { - switch x := x.(type) { - case nil: - return NewNull() - case bool: - return B - case string: - return S - case json.Number: - return N - case map[string]interface{}: - // The ast.ValueToInterface() function returns ast.Object values as map[string]interface{} - // so map[string]interface{} must be handled here because the type checker uses the value - // to interface conversion when inferring object types. - static := make([]*StaticProperty, 0, len(x)) - for k, v := range x { - static = append(static, NewStaticProperty(k, TypeOf(v))) - } - return NewObject(static, nil) - case map[interface{}]interface{}: - static := make([]*StaticProperty, 0, len(x)) - for k, v := range x { - static = append(static, NewStaticProperty(k, TypeOf(v))) - } - return NewObject(static, nil) - case []interface{}: - static := make([]Type, len(x)) - for i := range x { - static[i] = TypeOf(x[i]) - } - return NewArray(static, nil) - } - panic("unreachable") -} - -type typeSlice []Type - -func (s typeSlice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 } -func (s typeSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } -func (s typeSlice) Len() int { return len(s) } - -func typeSliceCompare(a, b []Type) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } else if len(b) < len(a) { - return 1 - } - return 0 -} - -func typeOrder(x Type) int { - switch unwrap(x).(type) { - case Null: - return 0 - case Boolean: - return 1 - case Number: - return 2 - case String: - return 3 - case *Array: - return 4 - case *Object: - return 5 - case *Set: - return 6 - case Any: - return 7 - case *Function: - return 8 - case nil: - return -1 - } - panic("unreachable") + return v1.TypeOf(x) } diff --git a/vendor/github.com/open-policy-agent/opa/util/backoff.go b/vendor/github.com/open-policy-agent/opa/util/backoff.go deleted file mode 100644 index 6fbf63ef773..00000000000 --- a/vendor/github.com/open-policy-agent/opa/util/backoff.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "math/rand" - "time" -) - -func init() { - // NOTE(sr): We don't need good random numbers here; it's used for jittering - // the backup timing a bit. But anyways, let's make it random enough; without - // a call to rand.Seed() we'd get the same stream of numbers for each program - // run. (Or not, if some other packages happens to seed the global randomness - // source.) - // Note(philipc): rand.Seed() was deprecated in Go 1.20, so we've switched to - // using the recommended rand.New(rand.NewSource(seed)) style. - rand.New(rand.NewSource(time.Now().UnixNano())) -} - -// DefaultBackoff returns a delay with an exponential backoff based on the -// number of retries. -func DefaultBackoff(base, max float64, retries int) time.Duration { - return Backoff(base, max, .2, 1.6, retries) -} - -// Backoff returns a delay with an exponential backoff based on the number of -// retries. Same algorithm used in gRPC. -func Backoff(base, max, jitter, factor float64, retries int) time.Duration { - if retries == 0 { - return 0 - } - - backoff, max := base, max - for backoff < max && retries > 0 { - backoff *= factor - retries-- - } - if backoff > max { - backoff = max - } - - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + jitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - - return time.Duration(backoff) -} diff --git a/vendor/github.com/open-policy-agent/opa/util/hashmap.go b/vendor/github.com/open-policy-agent/opa/util/hashmap.go deleted file mode 100644 index 8875a6323e8..00000000000 --- a/vendor/github.com/open-policy-agent/opa/util/hashmap.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "fmt" - "strings" -) - -// T is a concise way to refer to T. -type T interface{} - -type hashEntry struct { - k T - v T - next *hashEntry -} - -// HashMap represents a key/value map. -type HashMap struct { - eq func(T, T) bool - hash func(T) int - table map[int]*hashEntry - size int -} - -// NewHashMap returns a new empty HashMap. -func NewHashMap(eq func(T, T) bool, hash func(T) int) *HashMap { - return &HashMap{ - eq: eq, - hash: hash, - table: make(map[int]*hashEntry), - size: 0, - } -} - -// Copy returns a shallow copy of this HashMap. -func (h *HashMap) Copy() *HashMap { - cpy := NewHashMap(h.eq, h.hash) - h.Iter(func(k, v T) bool { - cpy.Put(k, v) - return false - }) - return cpy -} - -// Equal returns true if this HashMap equals the other HashMap. -// Two hash maps are equal if they contain the same key/value pairs. -func (h *HashMap) Equal(other *HashMap) bool { - if h.Len() != other.Len() { - return false - } - return !h.Iter(func(k, v T) bool { - ov, ok := other.Get(k) - if !ok { - return true - } - return !h.eq(v, ov) - }) -} - -// Get returns the value for k. -func (h *HashMap) Get(k T) (T, bool) { - hash := h.hash(k) - for entry := h.table[hash]; entry != nil; entry = entry.next { - if h.eq(entry.k, k) { - return entry.v, true - } - } - return nil, false -} - -// Delete removes the key k. -func (h *HashMap) Delete(k T) { - hash := h.hash(k) - var prev *hashEntry - for entry := h.table[hash]; entry != nil; entry = entry.next { - if h.eq(entry.k, k) { - if prev != nil { - prev.next = entry.next - } else { - h.table[hash] = entry.next - } - h.size-- - return - } - prev = entry - } -} - -// Hash returns the hash code for this hash map. -func (h *HashMap) Hash() int { - var hash int - h.Iter(func(k, v T) bool { - hash += h.hash(k) + h.hash(v) - return false - }) - return hash -} - -// Iter invokes the iter function for each element in the HashMap. -// If the iter function returns true, iteration stops and the return value is true. -// If the iter function never returns true, iteration proceeds through all elements -// and the return value is false. -func (h *HashMap) Iter(iter func(T, T) bool) bool { - for _, entry := range h.table { - for ; entry != nil; entry = entry.next { - if iter(entry.k, entry.v) { - return true - } - } - } - return false -} - -// Len returns the current size of this HashMap. -func (h *HashMap) Len() int { - return h.size -} - -// Put inserts a key/value pair into this HashMap. If the key is already present, the existing -// value is overwritten. -func (h *HashMap) Put(k T, v T) { - hash := h.hash(k) - head := h.table[hash] - for entry := head; entry != nil; entry = entry.next { - if h.eq(entry.k, k) { - entry.v = v - return - } - } - h.table[hash] = &hashEntry{k: k, v: v, next: head} - h.size++ -} - -func (h *HashMap) String() string { - var buf []string - h.Iter(func(k T, v T) bool { - buf = append(buf, fmt.Sprintf("%v: %v", k, v)) - return false - }) - return "{" + strings.Join(buf, ", ") + "}" -} - -// Update returns a new HashMap with elements from the other HashMap put into this HashMap. -// If the other HashMap contains elements with the same key as this HashMap, the value -// from the other HashMap overwrites the value from this HashMap. -func (h *HashMap) Update(other *HashMap) *HashMap { - updated := h.Copy() - other.Iter(func(k, v T) bool { - updated.Put(k, v) - return false - }) - return updated -} diff --git a/vendor/github.com/open-policy-agent/opa/util/maps.go b/vendor/github.com/open-policy-agent/opa/util/maps.go deleted file mode 100644 index d943b4d0a89..00000000000 --- a/vendor/github.com/open-policy-agent/opa/util/maps.go +++ /dev/null @@ -1,10 +0,0 @@ -package util - -// Values returns a slice of values from any map. Copied from golang.org/x/exp/maps. -func Values[M ~map[K]V, K comparable, V any](m M) []V { - r := make([]V, 0, len(m)) - for _, v := range m { - r = append(r, v) - } - return r -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go new file mode 100644 index 00000000000..def7604edfd --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go @@ -0,0 +1,1008 @@ +// Copyright 2022 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "encoding/json" + "fmt" + "net/url" + "slices" + "strings" + + "github.com/open-policy-agent/opa/internal/deepcopy" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + annotationScopePackage = "package" + annotationScopeRule = "rule" + annotationScopeDocument = "document" + annotationScopeSubpackages = "subpackages" +) + +var ( + scopeTerm = StringTerm("scope") + titleTerm = StringTerm("title") + entrypointTerm = StringTerm("entrypoint") + descriptionTerm = StringTerm("description") + organizationsTerm = StringTerm("organizations") + authorsTerm = StringTerm("authors") + relatedResourcesTerm = StringTerm("related_resources") + schemasTerm = StringTerm("schemas") + customTerm = StringTerm("custom") + refTerm = StringTerm("ref") + nameTerm = StringTerm("name") + emailTerm = StringTerm("email") + schemaTerm = StringTerm("schema") + definitionTerm = StringTerm("definition") + documentTerm = StringTerm(annotationScopeDocument) + packageTerm = StringTerm(annotationScopePackage) + ruleTerm = StringTerm(annotationScopeRule) + subpackagesTerm = StringTerm(annotationScopeSubpackages) +) + +type ( + // Annotations represents metadata attached to other AST nodes such as rules. + Annotations struct { + Scope string `json:"scope"` + Title string `json:"title,omitempty"` + Entrypoint bool `json:"entrypoint,omitempty"` + Description string `json:"description,omitempty"` + Organizations []string `json:"organizations,omitempty"` + RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"` + Authors []*AuthorAnnotation `json:"authors,omitempty"` + Schemas []*SchemaAnnotation `json:"schemas,omitempty"` + Custom map[string]interface{} `json:"custom,omitempty"` + Location *Location `json:"location,omitempty"` + + comments []*Comment + node Node + } + + // SchemaAnnotation contains a schema declaration for the document identified by the path. + SchemaAnnotation struct { + Path Ref `json:"path"` + Schema Ref `json:"schema,omitempty"` + Definition *interface{} `json:"definition,omitempty"` + } + + AuthorAnnotation struct { + Name string `json:"name"` + Email string `json:"email,omitempty"` + } + + RelatedResourceAnnotation struct { + Ref url.URL `json:"ref"` + Description string `json:"description,omitempty"` + } + + AnnotationSet struct { + byRule map[*Rule][]*Annotations + byPackage map[int]*Annotations + byPath *annotationTreeNode + modules []*Module // Modules this set was constructed from + } + + annotationTreeNode struct { + Value *Annotations + Children map[Value]*annotationTreeNode // we assume key elements are hashable (vars and strings only!) + } + + AnnotationsRef struct { + Path Ref `json:"path"` // The path of the node the annotations are applied to + Annotations *Annotations `json:"annotations,omitempty"` + Location *Location `json:"location,omitempty"` // The location of the node the annotations are applied to + + node Node // The node the annotations are applied to + } + + AnnotationsRefSet []*AnnotationsRef + + FlatAnnotationsRefSet AnnotationsRefSet +) + +func (a *Annotations) String() string { + bs, _ := a.MarshalJSON() + return string(bs) +} + +// Loc returns the location of this annotation. +func (a *Annotations) Loc() *Location { + return a.Location +} + +// SetLoc updates the location of this annotation. +func (a *Annotations) SetLoc(l *Location) { + a.Location = l +} + +// EndLoc returns the location of this annotation's last comment line. +func (a *Annotations) EndLoc() *Location { + count := len(a.comments) + if count == 0 { + return a.Location + } + return a.comments[count-1].Location +} + +// Compare returns an integer indicating if a is less than, equal to, or greater +// than other. +func (a *Annotations) Compare(other *Annotations) int { + + if a == nil && other == nil { + return 0 + } + + if a == nil { + return -1 + } + + if other == nil { + return 1 + } + + if cmp := scopeCompare(a.Scope, other.Scope); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Title, other.Title); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Description, other.Description); cmp != 0 { + return cmp + } + + if cmp := compareStringLists(a.Organizations, other.Organizations); cmp != 0 { + return cmp + } + + if cmp := compareRelatedResources(a.RelatedResources, other.RelatedResources); cmp != 0 { + return cmp + } + + if cmp := compareAuthors(a.Authors, other.Authors); cmp != 0 { + return cmp + } + + if cmp := compareSchemas(a.Schemas, other.Schemas); cmp != 0 { + return cmp + } + + if a.Entrypoint != other.Entrypoint { + if a.Entrypoint { + return 1 + } + return -1 + } + + if cmp := util.Compare(a.Custom, other.Custom); cmp != 0 { + return cmp + } + + return 0 +} + +// GetTargetPath returns the path of the node these Annotations are applied to (the target) +func (a *Annotations) GetTargetPath() Ref { + switch n := a.node.(type) { + case *Package: + return n.Path + case *Rule: + return n.Ref().GroundPrefix() + default: + return nil + } +} + +func (a *Annotations) MarshalJSON() ([]byte, error) { + if a == nil { + return []byte(`{"scope":""}`), nil + } + + data := map[string]interface{}{ + "scope": a.Scope, + } + + if a.Title != "" { + data["title"] = a.Title + } + + if a.Description != "" { + data["description"] = a.Description + } + + if a.Entrypoint { + data["entrypoint"] = a.Entrypoint + } + + if len(a.Organizations) > 0 { + data["organizations"] = a.Organizations + } + + if len(a.RelatedResources) > 0 { + data["related_resources"] = a.RelatedResources + } + + if len(a.Authors) > 0 { + data["authors"] = a.Authors + } + + if len(a.Schemas) > 0 { + data["schemas"] = a.Schemas + } + + if len(a.Custom) > 0 { + data["custom"] = a.Custom + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Annotations { + if a.Location != nil { + data["location"] = a.Location + } + } + + return json.Marshal(data) +} + +func NewAnnotationsRef(a *Annotations) *AnnotationsRef { + var loc *Location + if a.node != nil { + loc = a.node.Loc() + } + + return &AnnotationsRef{ + Location: loc, + Path: a.GetTargetPath(), + Annotations: a, + node: a.node, + } +} + +func (ar *AnnotationsRef) GetPackage() *Package { + switch n := ar.node.(type) { + case *Package: + return n + case *Rule: + return n.Module.Package + default: + return nil + } +} + +func (ar *AnnotationsRef) GetRule() *Rule { + switch n := ar.node.(type) { + case *Rule: + return n + default: + return nil + } +} + +func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "path": ar.Path, + } + + if ar.Annotations != nil { + data["annotations"] = ar.Annotations + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.AnnotationsRef { + if ar.Location != nil { + data["location"] = ar.Location + } + + // The location set for the schema ref terms is wrong (always set to + // row 1) and not really useful anyway.. so strip it out before marshalling + for _, schema := range ar.Annotations.Schemas { + if schema.Path != nil { + for _, term := range schema.Path { + term.Location = nil + } + } + } + } + + return json.Marshal(data) +} + +func scopeCompare(s1, s2 string) int { + o1 := scopeOrder(s1) + o2 := scopeOrder(s2) + + if o2 < o1 { + return 1 + } else if o2 > o1 { + return -1 + } + + if s1 < s2 { + return -1 + } else if s2 < s1 { + return 1 + } + + return 0 +} + +func scopeOrder(s string) int { + if s == annotationScopeRule { + return 1 + } + return 0 +} + +func compareAuthors(a, b []*AuthorAnnotation) int { + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + for i := range a { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + + return 0 +} + +func compareRelatedResources(a, b []*RelatedResourceAnnotation) int { + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + for i := range a { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + + return 0 +} + +func compareSchemas(a, b []*SchemaAnnotation) int { + maxLen := len(a) + if len(b) < maxLen { + maxLen = len(b) + } + + for i := range maxLen { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + return 0 +} + +func compareStringLists(a, b []string) int { + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + for i := range a { + if cmp := strings.Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + + return 0 +} + +// Copy returns a deep copy of s. +func (a *Annotations) Copy(node Node) *Annotations { + cpy := *a + + cpy.Organizations = make([]string, len(a.Organizations)) + copy(cpy.Organizations, a.Organizations) + + cpy.RelatedResources = make([]*RelatedResourceAnnotation, len(a.RelatedResources)) + for i := range a.RelatedResources { + cpy.RelatedResources[i] = a.RelatedResources[i].Copy() + } + + cpy.Authors = make([]*AuthorAnnotation, len(a.Authors)) + for i := range a.Authors { + cpy.Authors[i] = a.Authors[i].Copy() + } + + cpy.Schemas = make([]*SchemaAnnotation, len(a.Schemas)) + for i := range a.Schemas { + cpy.Schemas[i] = a.Schemas[i].Copy() + } + + if a.Custom != nil { + cpy.Custom = deepcopy.Map(a.Custom) + } + + cpy.node = node + + return &cpy +} + +// toObject constructs an AST Object from the annotation. +func (a *Annotations) toObject() (*Object, *Error) { + obj := NewObject() + + if a == nil { + return &obj, nil + } + + if len(a.Scope) > 0 { + switch a.Scope { + case annotationScopeDocument: + obj.Insert(scopeTerm, documentTerm) + case annotationScopePackage: + obj.Insert(scopeTerm, packageTerm) + case annotationScopeRule: + obj.Insert(scopeTerm, ruleTerm) + case annotationScopeSubpackages: + obj.Insert(scopeTerm, subpackagesTerm) + default: + obj.Insert(scopeTerm, StringTerm(a.Scope)) + } + } + + if len(a.Title) > 0 { + obj.Insert(titleTerm, StringTerm(a.Title)) + } + + if a.Entrypoint { + obj.Insert(entrypointTerm, InternedBooleanTerm(true)) + } + + if len(a.Description) > 0 { + obj.Insert(descriptionTerm, StringTerm(a.Description)) + } + + if len(a.Organizations) > 0 { + orgs := make([]*Term, 0, len(a.Organizations)) + for _, org := range a.Organizations { + orgs = append(orgs, StringTerm(org)) + } + obj.Insert(organizationsTerm, ArrayTerm(orgs...)) + } + + if len(a.RelatedResources) > 0 { + rrs := make([]*Term, 0, len(a.RelatedResources)) + for _, rr := range a.RelatedResources { + rrObj := NewObject(Item(refTerm, StringTerm(rr.Ref.String()))) + if len(rr.Description) > 0 { + rrObj.Insert(descriptionTerm, StringTerm(rr.Description)) + } + rrs = append(rrs, NewTerm(rrObj)) + } + obj.Insert(relatedResourcesTerm, ArrayTerm(rrs...)) + } + + if len(a.Authors) > 0 { + as := make([]*Term, 0, len(a.Authors)) + for _, author := range a.Authors { + aObj := NewObject() + if len(author.Name) > 0 { + aObj.Insert(nameTerm, StringTerm(author.Name)) + } + if len(author.Email) > 0 { + aObj.Insert(emailTerm, StringTerm(author.Email)) + } + as = append(as, NewTerm(aObj)) + } + obj.Insert(authorsTerm, ArrayTerm(as...)) + } + + if len(a.Schemas) > 0 { + ss := make([]*Term, 0, len(a.Schemas)) + for _, s := range a.Schemas { + sObj := NewObject() + if len(s.Path) > 0 { + sObj.Insert(pathTerm, NewTerm(s.Path.toArray())) + } + if len(s.Schema) > 0 { + sObj.Insert(schemaTerm, NewTerm(s.Schema.toArray())) + } + if s.Definition != nil { + def, err := InterfaceToValue(s.Definition) + if err != nil { + return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error()) + } + sObj.Insert(definitionTerm, NewTerm(def)) + } + ss = append(ss, NewTerm(sObj)) + } + obj.Insert(schemasTerm, ArrayTerm(ss...)) + } + + if len(a.Custom) > 0 { + c, err := InterfaceToValue(a.Custom) + if err != nil { + return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error()) + } + obj.Insert(customTerm, NewTerm(c)) + } + + return &obj, nil +} + +func attachRuleAnnotations(mod *Module) { + // make a copy of the annotations + cpy := make([]*Annotations, len(mod.Annotations)) + for i, a := range mod.Annotations { + cpy[i] = a.Copy(a.node) + } + + for _, rule := range mod.Rules { + var j int + var found bool + for i, a := range cpy { + if rule.Ref().GroundPrefix().Equal(a.GetTargetPath()) { + if a.Scope == annotationScopeDocument { + rule.Annotations = append(rule.Annotations, a) + } else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row { + j = i + found = true + rule.Annotations = append(rule.Annotations, a) + } + } + } + + if found && j < len(cpy) { + cpy = append(cpy[:j], cpy[j+1:]...) + } + } +} + +func attachAnnotationsNodes(mod *Module) Errors { + var errs Errors + + // Find first non-annotation statement following each annotation and attach + // the annotation to that statement. + for _, a := range mod.Annotations { + for _, stmt := range mod.stmts { + _, ok := stmt.(*Annotations) + if !ok { + if stmt.Loc().Row > a.Location.Row { + a.node = stmt + break + } + } + } + + if a.Scope == "" { + switch a.node.(type) { + case *Rule: + if a.Entrypoint { + a.Scope = annotationScopeDocument + } else { + a.Scope = annotationScopeRule + } + case *Package: + a.Scope = annotationScopePackage + case *Import: + // Note that this isn't a valid scope, but set here so that the + // validate function called below can print an error message with + // a context that makes sense ("invalid scope: 'import'" instead of + // "invalid scope: '') + a.Scope = "import" + } + } + + if err := validateAnnotationScopeAttachment(a); err != nil { + errs = append(errs, err) + } + + if err := validateAnnotationEntrypointAttachment(a); err != nil { + errs = append(errs, err) + } + } + + return errs +} + +func validateAnnotationScopeAttachment(a *Annotations) *Error { + + switch a.Scope { + case annotationScopeRule, annotationScopeDocument: + if _, ok := a.node.(*Rule); ok { + return nil + } + return newScopeAttachmentErr(a, "rule") + case annotationScopePackage, annotationScopeSubpackages: + if _, ok := a.node.(*Package); ok { + return nil + } + return newScopeAttachmentErr(a, "package") + } + + return NewError(ParseErr, a.Loc(), "invalid annotation scope '%v'. Use one of '%s', '%s', '%s', or '%s'", + a.Scope, annotationScopeRule, annotationScopeDocument, annotationScopePackage, annotationScopeSubpackages) +} + +func validateAnnotationEntrypointAttachment(a *Annotations) *Error { + if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) { + return NewError( + ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope) + } + return nil +} + +// Copy returns a deep copy of a. +func (a *AuthorAnnotation) Copy() *AuthorAnnotation { + cpy := *a + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (a *AuthorAnnotation) Compare(other *AuthorAnnotation) int { + if cmp := strings.Compare(a.Name, other.Name); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Email, other.Email); cmp != 0 { + return cmp + } + + return 0 +} + +func (a *AuthorAnnotation) String() string { + if len(a.Email) == 0 { + return a.Name + } else if len(a.Name) == 0 { + return fmt.Sprintf("<%s>", a.Email) + } + return fmt.Sprintf("%s <%s>", a.Name, a.Email) +} + +// Copy returns a deep copy of rr. +func (rr *RelatedResourceAnnotation) Copy() *RelatedResourceAnnotation { + cpy := *rr + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (rr *RelatedResourceAnnotation) Compare(other *RelatedResourceAnnotation) int { + if cmp := strings.Compare(rr.Description, other.Description); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(rr.Ref.String(), other.Ref.String()); cmp != 0 { + return cmp + } + + return 0 +} + +func (rr *RelatedResourceAnnotation) String() string { + bs, _ := json.Marshal(rr) + return string(bs) +} + +func (rr *RelatedResourceAnnotation) MarshalJSON() ([]byte, error) { + d := map[string]interface{}{ + "ref": rr.Ref.String(), + } + + if len(rr.Description) > 0 { + d["description"] = rr.Description + } + + return json.Marshal(d) +} + +// Copy returns a deep copy of s. +func (s *SchemaAnnotation) Copy() *SchemaAnnotation { + cpy := *s + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int { + if cmp := s.Path.Compare(other.Path); cmp != 0 { + return cmp + } + + if cmp := s.Schema.Compare(other.Schema); cmp != 0 { + return cmp + } + + if s.Definition != nil && other.Definition == nil { + return -1 + } else if s.Definition == nil && other.Definition != nil { + return 1 + } else if s.Definition != nil && other.Definition != nil { + return util.Compare(*s.Definition, *other.Definition) + } + + return 0 +} + +func (s *SchemaAnnotation) String() string { + bs, _ := json.Marshal(s) + return string(bs) +} + +func newAnnotationSet() *AnnotationSet { + return &AnnotationSet{ + byRule: map[*Rule][]*Annotations{}, + byPackage: map[int]*Annotations{}, + byPath: newAnnotationTree(), + } +} + +func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) { + as := newAnnotationSet() + var errs Errors + for _, m := range modules { + for _, a := range m.Annotations { + if err := as.add(a); err != nil { + errs = append(errs, err) + } + } + } + if len(errs) > 0 { + return nil, errs + } + as.modules = modules + return as, nil +} + +// NOTE(philipc): During copy propagation, the underlying Nodes can be +// stripped away from the annotations, leading to nil deref panics. We +// silently ignore these cases for now, as a workaround. +func (as *AnnotationSet) add(a *Annotations) *Error { + switch a.Scope { + case annotationScopeRule: + if rule, ok := a.node.(*Rule); ok { + as.byRule[rule] = append(as.byRule[rule], a) + } + case annotationScopePackage: + if pkg, ok := a.node.(*Package); ok { + hash := pkg.Path.Hash() + if exist, ok := as.byPackage[hash]; ok { + return errAnnotationRedeclared(a, exist.Location) + } + as.byPackage[hash] = a + } + case annotationScopeDocument: + if rule, ok := a.node.(*Rule); ok { + path := rule.Ref().GroundPrefix() + x := as.byPath.get(path) + if x != nil { + return errAnnotationRedeclared(a, x.Value.Location) + } + as.byPath.insert(path, a) + } + case annotationScopeSubpackages: + if pkg, ok := a.node.(*Package); ok { + x := as.byPath.get(pkg.Path) + if x != nil && x.Value != nil { + return errAnnotationRedeclared(a, x.Value.Location) + } + as.byPath.insert(pkg.Path, a) + } + } + return nil +} + +func (as *AnnotationSet) GetRuleScope(r *Rule) []*Annotations { + if as == nil { + return nil + } + return as.byRule[r] +} + +func (as *AnnotationSet) GetSubpackagesScope(path Ref) []*Annotations { + if as == nil { + return nil + } + return as.byPath.ancestors(path) +} + +func (as *AnnotationSet) GetDocumentScope(path Ref) *Annotations { + if as == nil { + return nil + } + if node := as.byPath.get(path); node != nil { + return node.Value + } + return nil +} + +func (as *AnnotationSet) GetPackageScope(pkg *Package) *Annotations { + if as == nil { + return nil + } + return as.byPackage[pkg.Path.Hash()] +} + +// Flatten returns a flattened list view of this AnnotationSet. +// The returned slice is sorted, first by the annotations' target path, then by their target location +func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet { + // This preallocation often won't be optimal, but it's superior to starting with a nil slice. + refs := make([]*AnnotationsRef, 0, len(as.byPath.Children)+len(as.byRule)+len(as.byPackage)) + + refs = as.byPath.flatten(refs) + + for _, a := range as.byPackage { + refs = append(refs, NewAnnotationsRef(a)) + } + + for _, as := range as.byRule { + for _, a := range as { + refs = append(refs, NewAnnotationsRef(a)) + } + } + + // Sort by path, then annotation location, for stable output + slices.SortStableFunc(refs, (*AnnotationsRef).Compare) + + return refs +} + +// Chain returns the chain of annotations leading up to the given rule. +// The returned slice is ordered as follows +// 0. Entries for the given rule, ordered from the METADATA block declared immediately above the rule, to the block declared farthest away (always at least one entry) +// 1. The 'document' scope entry, if any +// 2. The 'package' scope entry, if any +// 3. Entries for the 'subpackages' scope, if any; ordered from the closest package path to the fartest. E.g.: 'do.re.mi', 'do.re', 'do' +// The returned slice is guaranteed to always contain at least one entry, corresponding to the given rule. +func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet { + var refs []*AnnotationsRef + + ruleAnnots := as.GetRuleScope(rule) + + if len(ruleAnnots) >= 1 { + for _, a := range ruleAnnots { + refs = append(refs, NewAnnotationsRef(a)) + } + } else { + // Make sure there is always a leading entry representing the passed rule, even if it has no annotations + refs = append(refs, &AnnotationsRef{ + Location: rule.Location, + Path: rule.Ref().GroundPrefix(), + node: rule, + }) + } + + if len(refs) > 1 { + // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward + slices.SortStableFunc(refs, func(a, b *AnnotationsRef) int { + return -a.Annotations.Location.Compare(b.Annotations.Location) + }) + } + + docAnnots := as.GetDocumentScope(rule.Ref().GroundPrefix()) + if docAnnots != nil { + refs = append(refs, NewAnnotationsRef(docAnnots)) + } + + pkg := rule.Module.Package + pkgAnnots := as.GetPackageScope(pkg) + if pkgAnnots != nil { + refs = append(refs, NewAnnotationsRef(pkgAnnots)) + } + + subPkgAnnots := as.GetSubpackagesScope(pkg.Path) + // We need to reverse the order, as subPkgAnnots ordering will start at the root, + // whereas we want to end at the root. + for i := len(subPkgAnnots) - 1; i >= 0; i-- { + refs = append(refs, NewAnnotationsRef(subPkgAnnots[i])) + } + + return refs +} + +func (ars FlatAnnotationsRefSet) Insert(ar *AnnotationsRef) FlatAnnotationsRefSet { + result := make(FlatAnnotationsRefSet, 0, len(ars)+1) + + // insertion sort, first by path, then location + for i, current := range ars { + if ar.Compare(current) < 0 { + result = append(result, ar) + result = append(result, ars[i:]...) + break + } + result = append(result, current) + } + + if len(result) < len(ars)+1 { + result = append(result, ar) + } + + return result +} + +func newAnnotationTree() *annotationTreeNode { + return &annotationTreeNode{ + Value: nil, + Children: map[Value]*annotationTreeNode{}, + } +} + +func (t *annotationTreeNode) insert(path Ref, value *Annotations) { + node := t + for _, k := range path { + child, ok := node.Children[k.Value] + if !ok { + child = newAnnotationTree() + node.Children[k.Value] = child + } + node = child + } + node.Value = value +} + +func (t *annotationTreeNode) get(path Ref) *annotationTreeNode { + node := t + for _, k := range path { + if node == nil { + return nil + } + child, ok := node.Children[k.Value] + if !ok { + return nil + } + node = child + } + return node +} + +// ancestors returns a slice of annotations in ascending order, starting with the root of ref; e.g.: 'root', 'root.foo', 'root.foo.bar'. +func (t *annotationTreeNode) ancestors(path Ref) (result []*Annotations) { + node := t + for _, k := range path { + if node == nil { + return result + } + child, ok := node.Children[k.Value] + if !ok { + return result + } + if child.Value != nil { + result = append(result, child.Value) + } + node = child + } + return result +} + +func (t *annotationTreeNode) flatten(refs []*AnnotationsRef) []*AnnotationsRef { + if a := t.Value; a != nil { + refs = append(refs, NewAnnotationsRef(a)) + } + for _, c := range t.Children { + refs = c.flatten(refs) + } + return refs +} + +func (ar *AnnotationsRef) Compare(other *AnnotationsRef) int { + if c := ar.Path.Compare(other.Path); c != 0 { + return c + } + + if c := ar.Annotations.Location.Compare(other.Annotations.Location); c != 0 { + return c + } + + return ar.Annotations.Compare(other.Annotations) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go new file mode 100644 index 00000000000..32ab2d153fd --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go @@ -0,0 +1,3407 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "strings" + + "github.com/open-policy-agent/opa/v1/types" +) + +// Builtins is the registry of built-in functions supported by OPA. +// Call RegisterBuiltin to add a new built-in. +var Builtins []*Builtin + +// RegisterBuiltin adds a new built-in function to the registry. +func RegisterBuiltin(b *Builtin) { + Builtins = append(Builtins, b) + BuiltinMap[b.Name] = b + if len(b.Infix) > 0 { + BuiltinMap[b.Infix] = b + } +} + +// DefaultBuiltins is the registry of built-in functions supported in OPA +// by default. When adding a new built-in function to OPA, update this +// list. +var DefaultBuiltins = [...]*Builtin{ + // Unification/equality ("=") + Equality, + + // Assignment (":=") + Assign, + + // Membership, infix "in": `x in xs` + Member, + MemberWithKey, + + // Comparisons + GreaterThan, + GreaterThanEq, + LessThan, + LessThanEq, + NotEqual, + Equal, + + // Arithmetic + Plus, + Minus, + Multiply, + Divide, + Ceil, + Floor, + Round, + Abs, + Rem, + + // Bitwise Arithmetic + BitsOr, + BitsAnd, + BitsNegate, + BitsXOr, + BitsShiftLeft, + BitsShiftRight, + + // Binary + And, + Or, + + // Aggregates + Count, + Sum, + Product, + Max, + Min, + Any, + All, + + // Arrays + ArrayConcat, + ArraySlice, + ArrayReverse, + + // Conversions + ToNumber, + + // Casts (DEPRECATED) + CastObject, + CastNull, + CastBoolean, + CastString, + CastSet, + CastArray, + + // Regular Expressions + RegexIsValid, + RegexMatch, + RegexMatchDeprecated, + RegexSplit, + GlobsMatch, + RegexTemplateMatch, + RegexFind, + RegexFindAllStringSubmatch, + RegexReplace, + + // Sets + SetDiff, + Intersection, + Union, + + // Strings + AnyPrefixMatch, + AnySuffixMatch, + Concat, + FormatInt, + IndexOf, + IndexOfN, + Substring, + Lower, + Upper, + Contains, + StringCount, + StartsWith, + EndsWith, + Split, + Replace, + ReplaceN, + Trim, + TrimLeft, + TrimPrefix, + TrimRight, + TrimSuffix, + TrimSpace, + Sprintf, + StringReverse, + RenderTemplate, + + // Numbers + NumbersRange, + NumbersRangeStep, + RandIntn, + + // Encoding + JSONMarshal, + JSONMarshalWithOptions, + JSONUnmarshal, + JSONIsValid, + Base64Encode, + Base64Decode, + Base64IsValid, + Base64UrlEncode, + Base64UrlEncodeNoPad, + Base64UrlDecode, + URLQueryDecode, + URLQueryEncode, + URLQueryEncodeObject, + URLQueryDecodeObject, + YAMLMarshal, + YAMLUnmarshal, + YAMLIsValid, + HexEncode, + HexDecode, + + // Object Manipulation + ObjectUnion, + ObjectUnionN, + ObjectRemove, + ObjectFilter, + ObjectGet, + ObjectKeys, + ObjectSubset, + + // JSON Object Manipulation + JSONFilter, + JSONRemove, + JSONPatch, + + // Tokens + JWTDecode, + JWTVerifyRS256, + JWTVerifyRS384, + JWTVerifyRS512, + JWTVerifyPS256, + JWTVerifyPS384, + JWTVerifyPS512, + JWTVerifyES256, + JWTVerifyES384, + JWTVerifyES512, + JWTVerifyHS256, + JWTVerifyHS384, + JWTVerifyHS512, + JWTDecodeVerify, + JWTEncodeSignRaw, + JWTEncodeSign, + + // Time + NowNanos, + ParseNanos, + ParseRFC3339Nanos, + ParseDurationNanos, + Format, + Date, + Clock, + Weekday, + AddDate, + Diff, + + // Crypto + CryptoX509ParseCertificates, + CryptoX509ParseAndVerifyCertificates, + CryptoX509ParseAndVerifyCertificatesWithOptions, + CryptoMd5, + CryptoSha1, + CryptoSha256, + CryptoX509ParseCertificateRequest, + CryptoX509ParseRSAPrivateKey, + CryptoX509ParseKeyPair, + CryptoParsePrivateKeys, + CryptoHmacMd5, + CryptoHmacSha1, + CryptoHmacSha256, + CryptoHmacSha512, + CryptoHmacEqual, + + // Graphs + WalkBuiltin, + ReachableBuiltin, + ReachablePathsBuiltin, + + // Sort + Sort, + + // Types + IsNumber, + IsString, + IsBoolean, + IsArray, + IsSet, + IsObject, + IsNull, + TypeNameBuiltin, + + // HTTP + HTTPSend, + + // GraphQL + GraphQLParse, + GraphQLParseAndVerify, + GraphQLParseQuery, + GraphQLParseSchema, + GraphQLIsValid, + GraphQLSchemaIsValid, + + // JSON Schema + JSONSchemaVerify, + JSONMatchSchema, + + // Cloud Provider Helpers + ProvidersAWSSignReqObj, + + // Rego + RegoParseModule, + RegoMetadataChain, + RegoMetadataRule, + + // OPA + OPARuntime, + + // Tracing + Trace, + + // Networking + NetCIDROverlap, + NetCIDRIntersects, + NetCIDRContains, + NetCIDRContainsMatches, + NetCIDRExpand, + NetCIDRMerge, + NetLookupIPAddr, + NetCIDRIsValid, + + // Glob + GlobMatch, + GlobQuoteMeta, + + // Units + UnitsParse, + UnitsParseBytes, + + // UUIDs + UUIDRFC4122, + UUIDParse, + + // SemVers + SemVerIsValid, + SemVerCompare, + + // Printing + Print, + InternalPrint, + + // Testing + InternalTestCase, +} + +// BuiltinMap provides a convenient mapping of built-in names to +// built-in definitions. +var BuiltinMap map[string]*Builtin + +// Deprecated: Builtins can now be directly annotated with the +// Nondeterministic property, and when set to true, will be ignored +// for partial evaluation. +var IgnoreDuringPartialEval = []*Builtin{ + RandIntn, + UUIDRFC4122, + JWTDecodeVerify, + JWTEncodeSignRaw, + JWTEncodeSign, + NowNanos, + HTTPSend, + OPARuntime, + NetLookupIPAddr, +} + +/** + * Unification + */ + +// Equality represents the "=" operator. +var Equality = &Builtin{ + Name: "eq", + Infix: "=", + Decl: types.NewFunction( + types.Args(types.A, types.A), + types.B, + ), +} + +/** + * Assignment + */ + +// Assign represents the assignment (":=") operator. +var Assign = &Builtin{ + Name: "assign", + Infix: ":=", + Decl: types.NewFunction( + types.Args(types.A, types.A), + types.B, + ), +} + +// Member represents the `in` (infix) operator. +var Member = &Builtin{ + Name: "internal.member_2", + Infix: "in", + Decl: types.NewFunction( + types.Args( + types.A, + types.A, + ), + types.B, + ), +} + +// MemberWithKey represents the `in` (infix) operator when used +// with two terms on the lhs, i.e., `k, v in obj`. +var MemberWithKey = &Builtin{ + Name: "internal.member_3", + Infix: "in", + Decl: types.NewFunction( + types.Args( + types.A, + types.A, + types.A, + ), + types.B, + ), +} + +/** + * Comparisons + */ +var comparison = category("comparison") + +var GreaterThan = &Builtin{ + Name: "gt", + Infix: ">", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is greater than `y`; false otherwise"), + ), +} + +var GreaterThanEq = &Builtin{ + Name: "gte", + Infix: ">=", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is greater or equal to `y`; false otherwise"), + ), +} + +// LessThan represents the "<" comparison operator. +var LessThan = &Builtin{ + Name: "lt", + Infix: "<", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is less than `y`; false otherwise"), + ), +} + +var LessThanEq = &Builtin{ + Name: "lte", + Infix: "<=", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is less than or equal to `y`; false otherwise"), + ), +} + +var NotEqual = &Builtin{ + Name: "neq", + Infix: "!=", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is not equal to `y`; false otherwise"), + ), +} + +// Equal represents the "==" comparison operator. +var Equal = &Builtin{ + Name: "equal", + Infix: "==", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is equal to `y`; false otherwise"), + ), +} + +/** + * Arithmetic + */ +var number = category("numbers") + +var Plus = &Builtin{ + Name: "plus", + Infix: "+", + Description: "Plus adds two numbers together.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N), + types.Named("y", types.N), + ), + types.Named("z", types.N).Description("the sum of `x` and `y`"), + ), + Categories: number, +} + +var Minus = &Builtin{ + Name: "minus", + Infix: "-", + Description: "Minus subtracts the second number from the first number or computes the difference between two sets.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny(types.N, types.SetOfAny)), + types.Named("y", types.NewAny(types.N, types.SetOfAny)), + ), + types.Named("z", types.NewAny(types.N, types.SetOfAny)).Description("the difference of `x` and `y`"), + ), + Categories: category("sets", "numbers"), +} + +var Multiply = &Builtin{ + Name: "mul", + Infix: "*", + Description: "Multiplies two numbers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N), + types.Named("y", types.N), + ), + types.Named("z", types.N).Description("the product of `x` and `y`"), + ), + Categories: number, +} + +var Divide = &Builtin{ + Name: "div", + Infix: "/", + Description: "Divides the first number by the second number.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the dividend"), + types.Named("y", types.N).Description("the divisor"), + ), + types.Named("z", types.N).Description("the result of `x` divided by `y`"), + ), + Categories: number, +} + +var Round = &Builtin{ + Name: "round", + Description: "Rounds the number to the nearest integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to round"), + ), + types.Named("y", types.N).Description("the result of rounding `x`"), + ), + Categories: number, +} + +var Ceil = &Builtin{ + Name: "ceil", + Description: "Rounds the number _up_ to the nearest integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to round"), + ), + types.Named("y", types.N).Description("the result of rounding `x` _up_"), + ), + Categories: number, +} + +var Floor = &Builtin{ + Name: "floor", + Description: "Rounds the number _down_ to the nearest integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to round"), + ), + types.Named("y", types.N).Description("the result of rounding `x` _down_"), + ), + Categories: number, +} + +var Abs = &Builtin{ + Name: "abs", + Description: "Returns the number without its sign.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to take the absolute value of"), + ), + types.Named("y", types.N).Description("the absolute value of `x`"), + ), + Categories: number, +} + +var Rem = &Builtin{ + Name: "rem", + Infix: "%", + Description: "Returns the remainder for of `x` divided by `y`, for `y != 0`.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N), + types.Named("y", types.N), + ), + types.Named("z", types.N).Description("the remainder"), + ), + Categories: number, +} + +/** + * Bitwise + */ + +var BitsOr = &Builtin{ + Name: "bits.or", + Description: "Returns the bitwise \"OR\" of two integers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the first integer"), + types.Named("y", types.N).Description("the second integer"), + ), + types.Named("z", types.N).Description("the bitwise OR of `x` and `y`"), + ), +} + +var BitsAnd = &Builtin{ + Name: "bits.and", + Description: "Returns the bitwise \"AND\" of two integers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the first integer"), + types.Named("y", types.N).Description("the second integer"), + ), + types.Named("z", types.N).Description("the bitwise AND of `x` and `y`"), + ), +} + +var BitsNegate = &Builtin{ + Name: "bits.negate", + Description: "Returns the bitwise negation (flip) of an integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the integer to negate"), + ), + types.Named("z", types.N).Description("the bitwise negation of `x`"), + ), +} + +var BitsXOr = &Builtin{ + Name: "bits.xor", + Description: "Returns the bitwise \"XOR\" (exclusive-or) of two integers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the first integer"), + types.Named("y", types.N).Description("the second integer"), + ), + types.Named("z", types.N).Description("the bitwise XOR of `x` and `y`"), + ), +} + +var BitsShiftLeft = &Builtin{ + Name: "bits.lsh", + Description: "Returns a new integer with its bits shifted `s` bits to the left.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the integer to shift"), + types.Named("s", types.N).Description("the number of bits to shift"), + ), + types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the left"), + ), +} + +var BitsShiftRight = &Builtin{ + Name: "bits.rsh", + Description: "Returns a new integer with its bits shifted `s` bits to the right.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the integer to shift"), + types.Named("s", types.N).Description("the number of bits to shift"), + ), + types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the right"), + ), +} + +/** + * Sets + */ + +var sets = category("sets") + +var And = &Builtin{ + Name: "and", + Infix: "&", + Description: "Returns the intersection of two sets.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.SetOfAny).Description("the first set"), + types.Named("y", types.SetOfAny).Description("the second set"), + ), + types.Named("z", types.SetOfAny).Description("the intersection of `x` and `y`"), + ), + Categories: sets, +} + +// Or performs a union operation on sets. +var Or = &Builtin{ + Name: "or", + Infix: "|", + Description: "Returns the union of two sets.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.SetOfAny), + types.Named("y", types.SetOfAny), + ), + types.Named("z", types.SetOfAny).Description("the union of `x` and `y`"), + ), + Categories: sets, +} + +var Intersection = &Builtin{ + Name: "intersection", + Description: "Returns the intersection of the given input sets.", + Decl: types.NewFunction( + types.Args( + types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to intersect"), + ), + types.Named("y", types.SetOfAny).Description("the intersection of all `xs` sets"), + ), + Categories: sets, +} + +var Union = &Builtin{ + Name: "union", + Description: "Returns the union of the given input sets.", + Decl: types.NewFunction( + types.Args( + types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to merge"), + ), + types.Named("y", types.SetOfAny).Description("the union of all `xs` sets"), + ), + Categories: sets, +} + +/** + * Aggregates + */ + +var aggregates = category("aggregates") + +var Count = &Builtin{ + Name: "count", + Description: " Count takes a collection or string and returns the number of elements (or characters) in it.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.S, + )).Description("the set/array/object/string to be counted"), + ), + types.Named("n", types.N).Description("the count of elements, key/val pairs, or characters, respectively."), + ), + Categories: aggregates, +} + +var Sum = &Builtin{ + Name: "sum", + Description: "Sums elements of an array or set of numbers.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfNum, + types.NewArray(nil, types.N), + )).Description("the set or array of numbers to sum"), + ), + types.Named("n", types.N).Description("the sum of all elements"), + ), + Categories: aggregates, +} + +var Product = &Builtin{ + Name: "product", + Description: "Multiplies elements of an array or set of numbers", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfNum, + types.NewArray(nil, types.N), + )).Description("the set or array of numbers to multiply"), + ), + types.Named("n", types.N).Description("the product of all elements"), + ), + Categories: aggregates, +} + +var Max = &Builtin{ + Name: "max", + Description: "Returns the maximum value in a collection.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + )).Description("the set or array to be searched"), + ), + types.Named("n", types.A).Description("the maximum of all elements"), + ), + Categories: aggregates, +} + +var Min = &Builtin{ + Name: "min", + Description: "Returns the minimum value in a collection.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + )).Description("the set or array to be searched"), + ), + types.Named("n", types.A).Description("the minimum of all elements"), + ), + Categories: aggregates, +} + +/** + * Sorting + */ + +var Sort = &Builtin{ + Name: "sort", + Description: "Returns a sorted array.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.NewArray(nil, types.A), + types.SetOfAny, + )).Description("the array or set to be sorted"), + ), + types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"), + ), + Categories: aggregates, +} + +/** + * Arrays + */ + +var ArrayConcat = &Builtin{ + Name: "array.concat", + Description: "Concatenates two arrays.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewArray(nil, types.A)).Description("the first array"), + types.Named("y", types.NewArray(nil, types.A)).Description("the second array"), + ), + types.Named("z", types.NewArray(nil, types.A)).Description("the concatenation of `x` and `y`"), + ), +} + +var ArraySlice = &Builtin{ + Name: "array.slice", + Description: "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.", + Decl: types.NewFunction( + types.Args( + types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"), + types.Named("start", types.N).Description("the start index of the returned slice; if less than zero, it's clamped to 0"), + types.Named("stop", types.N).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"), + ), + types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"), + ), +} // NOTE(sr): this function really needs examples + +var ArrayReverse = &Builtin{ + Name: "array.reverse", + Description: "Returns the reverse of a given array.", + Decl: types.NewFunction( + types.Args( + types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be reversed"), + ), + types.Named("rev", types.NewArray(nil, types.A)).Description("an array containing the elements of `arr` in reverse order"), + ), +} + +/** + * Conversions + */ +var conversions = category("conversions") + +var ToNumber = &Builtin{ + Name: "to_number", + Description: "Converts a string, bool, or number value to a number: Strings are converted to numbers using `strconv.Atoi`, Boolean `false` is converted to 0 and `true` is converted to 1.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.S, + types.B, + types.NewNull(), + )).Description("value to convert"), + ), + types.Named("num", types.N).Description("the numeric representation of `x`"), + ), + Categories: conversions, +} + +/** + * Regular Expressions + */ + +var RegexMatch = &Builtin{ + Name: "regex.match", + Description: "Matches a string against a regular expression.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("value to match against `pattern`"), + ), + types.Named("result", types.B).Description("true if `value` matches `pattern`"), + ), +} + +var RegexIsValid = &Builtin{ + Name: "regex.is_valid", + Description: "Checks if a string is a valid regular expression: the detailed syntax for patterns is defined by https://github.com/google/re2/wiki/Syntax.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + ), + types.Named("result", types.B).Description("true if `pattern` is a valid regular expression"), + ), +} + +var RegexFindAllStringSubmatch = &Builtin{ + Name: "regex.find_all_string_submatch_n", + Description: "Returns all successive matches of the expression.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("string to match"), + types.Named("number", types.N).Description("number of matches to return; `-1` means all matches"), + ), + types.Named("output", types.NewArray(nil, types.NewArray(nil, types.S))).Description("array of all matches"), + ), +} + +var RegexTemplateMatch = &Builtin{ + Name: "regex.template_match", + Description: "Matches a string against a pattern, where there pattern may be glob-like", + Decl: types.NewFunction( + types.Args( + types.Named("template", types.S).Description("template expression containing `0..n` regular expressions"), + types.Named("value", types.S).Description("string to match"), + types.Named("delimiter_start", types.S).Description("start delimiter of the regular expression in `template`"), + types.Named("delimiter_end", types.S).Description("end delimiter of the regular expression in `template`"), + ), + types.Named("result", types.B).Description("true if `value` matches the `template`"), + ), +} // TODO(sr): example:`regex.template_match("urn:foo:{.*}", "urn:foo:bar:baz", "{", "}")`` returns ``true``. + +var RegexSplit = &Builtin{ + Name: "regex.split", + Description: "Splits the input string by the occurrences of the given pattern.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("string to match"), + ), + types.Named("output", types.NewArray(nil, types.S)).Description("the parts obtained by splitting `value`"), + ), +} + +// RegexFind takes two strings and a number, the pattern, the value and number of match values to +// return, -1 means all match values. +var RegexFind = &Builtin{ + Name: "regex.find_n", + Description: "Returns the specified number of matches when matching the input against the pattern.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("string to match"), + types.Named("number", types.N).Description("number of matches to return, if `-1`, returns all matches"), + ), + types.Named("output", types.NewArray(nil, types.S)).Description("collected matches"), + ), +} + +// GlobsMatch takes two strings regexp-style strings and evaluates to true if their +// intersection matches a non-empty set of non-empty strings. +// Examples: +// - "a.a." and ".b.b" -> true. +// - "[a-z]*" and [0-9]+" -> not true. +var GlobsMatch = &Builtin{ + Name: "regex.globs_match", + Description: `Checks if the intersection of two glob-style regular expressions matches a non-empty set of non-empty strings. +The set of regex symbols is limited for this builtin: only ` + "`.`, `*`, `+`, `[`, `-`, `]` and `\\` are treated as special symbols.", + Decl: types.NewFunction( + types.Args( + types.Named("glob1", types.S).Description("first glob-style regular expression"), + types.Named("glob2", types.S).Description("second glob-style regular expression"), + ), + types.Named("result", types.B).Description("true if the intersection of `glob1` and `glob2` matches a non-empty set of non-empty strings"), + ), +} + +/** + * Strings + */ +var stringsCat = category("strings") + +var AnyPrefixMatch = &Builtin{ + Name: "strings.any_prefix_match", + Description: "Returns true if any of the search strings begins with any of the base strings.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.NewAny( + types.S, + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("search string(s)"), + types.Named("base", types.NewAny( + types.S, + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("base string(s)"), + ), + types.Named("result", types.B).Description("result of the prefix check"), + ), + Categories: stringsCat, +} + +var AnySuffixMatch = &Builtin{ + Name: "strings.any_suffix_match", + Description: "Returns true if any of the search strings ends with any of the base strings.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.NewAny( + types.S, + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("search string(s)"), + types.Named("base", types.NewAny( + types.S, + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("base string(s)"), + ), + types.Named("result", types.B).Description("result of the suffix check"), + ), + Categories: stringsCat, +} + +var Concat = &Builtin{ + Name: "concat", + Description: "Joins a set or array of strings with a delimiter.", + Decl: types.NewFunction( + types.Args( + types.Named("delimiter", types.S).Description("string to use as a delimiter"), + types.Named("collection", types.NewAny( + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("strings to join"), + ), + types.Named("output", types.S).Description("the joined string"), + ), + Categories: stringsCat, +} + +var FormatInt = &Builtin{ + Name: "format_int", + Description: "Returns the string representation of the number in the given base after rounding it down to an integer value.", + Decl: types.NewFunction( + types.Args( + types.Named("number", types.N).Description("number to format"), + types.Named("base", types.N).Description("base of number representation to use"), + ), + types.Named("output", types.S).Description("formatted number"), + ), + Categories: stringsCat, +} + +var IndexOf = &Builtin{ + Name: "indexof", + Description: "Returns the index of a substring contained inside a string.", + Decl: types.NewFunction( + types.Args( + types.Named("haystack", types.S).Description("string to search in"), + types.Named("needle", types.S).Description("substring to look for"), + ), + types.Named("output", types.N).Description("index of first occurrence, `-1` if not found"), + ), + Categories: stringsCat, +} + +var IndexOfN = &Builtin{ + Name: "indexof_n", + Description: "Returns a list of all the indexes of a substring contained inside a string.", + Decl: types.NewFunction( + types.Args( + types.Named("haystack", types.S).Description("string to search in"), + types.Named("needle", types.S).Description("substring to look for"), + ), + types.Named("output", types.NewArray(nil, types.N)).Description("all indices at which `needle` occurs in `haystack`, may be empty"), + ), + Categories: stringsCat, +} + +var Substring = &Builtin{ + Name: "substring", + Description: "Returns the portion of a string for a given `offset` and a `length`. If `length < 0`, `output` is the remainder of the string.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to extract substring from"), + types.Named("offset", types.N).Description("offset, must be positive"), + types.Named("length", types.N).Description("length of the substring starting from `offset`"), + ), + types.Named("output", types.S).Description("substring of `value` from `offset`, of length `length`"), + ), + Categories: stringsCat, +} + +var Contains = &Builtin{ + Name: "contains", + Description: "Returns `true` if the search string is included in the base string", + Decl: types.NewFunction( + types.Args( + types.Named("haystack", types.S).Description("string to search in"), + types.Named("needle", types.S).Description("substring to look for"), + ), + types.Named("result", types.B).Description("result of the containment check"), + ), + Categories: stringsCat, +} + +var StringCount = &Builtin{ + Name: "strings.count", + Description: "Returns the number of non-overlapping instances of a substring in a string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("string to search in"), + types.Named("substring", types.S).Description("substring to look for"), + ), + types.Named("output", types.N).Description("count of occurrences, `0` if not found"), + ), + Categories: stringsCat, +} + +var StartsWith = &Builtin{ + Name: "startswith", + Description: "Returns true if the search string begins with the base string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("search string"), + types.Named("base", types.S).Description("base string"), + ), + types.Named("result", types.B).Description("result of the prefix check"), + ), + Categories: stringsCat, +} + +var EndsWith = &Builtin{ + Name: "endswith", + Description: "Returns true if the search string ends with the base string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("search string"), + types.Named("base", types.S).Description("base string"), + ), + types.Named("result", types.B).Description("result of the suffix check"), + ), + Categories: stringsCat, +} + +var Lower = &Builtin{ + Name: "lower", + Description: "Returns the input string but with all characters in lower-case.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string that is converted to lower-case"), + ), + types.Named("y", types.S).Description("lower-case of x"), + ), + Categories: stringsCat, +} + +var Upper = &Builtin{ + Name: "upper", + Description: "Returns the input string but with all characters in upper-case.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string that is converted to upper-case"), + ), + types.Named("y", types.S).Description("upper-case of x"), + ), + Categories: stringsCat, +} + +var Split = &Builtin{ + Name: "split", + Description: "Split returns an array containing elements of the input string split on a delimiter.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string that is split"), + types.Named("delimiter", types.S).Description("delimiter used for splitting"), + ), + types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"), + ), + Categories: stringsCat, +} + +var Replace = &Builtin{ + Name: "replace", + Description: "Replace replaces all instances of a sub-string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string being processed"), + types.Named("old", types.S).Description("substring to replace"), + types.Named("new", types.S).Description("string to replace `old` with"), + ), + types.Named("y", types.S).Description("string with replaced substrings"), + ), + Categories: stringsCat, +} + +var ReplaceN = &Builtin{ + Name: "strings.replace_n", + Description: `Replaces a string from a list of old, new string pairs. +Replacements are performed in the order they appear in the target string, without overlapping matches. +The old string comparisons are done in argument order.`, + Decl: types.NewFunction( + types.Args( + types.Named("patterns", types.NewObject( + nil, + types.NewDynamicProperty( + types.S, + types.S)), + ).Description("replacement pairs"), + types.Named("value", types.S).Description("string to replace substring matches in"), + ), + types.Named("output", types.S).Description("string with replaced substrings"), + ), +} + +var RegexReplace = &Builtin{ + Name: "regex.replace", + Description: `Find and replaces the text using the regular expression pattern.`, + Decl: types.NewFunction( + types.Args( + types.Named("s", types.S).Description("string being processed"), + types.Named("pattern", types.S).Description("regex pattern to be applied"), + types.Named("value", types.S).Description("regex value"), + ), + types.Named("output", types.S).Description("string with replaced substrings"), + ), +} + +var Trim = &Builtin{ + Name: "trim", + Description: "Returns `value` with all leading or trailing instances of the `cutset` characters removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("cutset", types.S).Description("string of characters that are cut off"), + ), + types.Named("output", types.S).Description("string trimmed of `cutset` characters"), + ), + Categories: stringsCat, +} + +var TrimLeft = &Builtin{ + Name: "trim_left", + Description: "Returns `value` with all leading instances of the `cutset` characters removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("cutset", types.S).Description("string of characters that are cut off on the left"), + ), + types.Named("output", types.S).Description("string left-trimmed of `cutset` characters"), + ), + Categories: stringsCat, +} + +var TrimPrefix = &Builtin{ + Name: "trim_prefix", + Description: "Returns `value` without the prefix. If `value` doesn't start with `prefix`, it is returned unchanged.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("prefix", types.S).Description("prefix to cut off"), + ), + types.Named("output", types.S).Description("string with `prefix` cut off"), + ), + Categories: stringsCat, +} + +var TrimRight = &Builtin{ + Name: "trim_right", + Description: "Returns `value` with all trailing instances of the `cutset` characters removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("cutset", types.S).Description("string of characters that are cut off on the right"), + ), + types.Named("output", types.S).Description("string right-trimmed of `cutset` characters"), + ), + Categories: stringsCat, +} + +var TrimSuffix = &Builtin{ + Name: "trim_suffix", + Description: "Returns `value` without the suffix. If `value` doesn't end with `suffix`, it is returned unchanged.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("suffix", types.S).Description("suffix to cut off"), + ), + types.Named("output", types.S).Description("string with `suffix` cut off"), + ), + Categories: stringsCat, +} + +var TrimSpace = &Builtin{ + Name: "trim_space", + Description: "Return the given string with all leading and trailing white space removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + ), + types.Named("output", types.S).Description("string leading and trailing white space cut off"), + ), + Categories: stringsCat, +} + +var Sprintf = &Builtin{ + Name: "sprintf", + Description: "Returns the given string, formatted.", + Decl: types.NewFunction( + types.Args( + types.Named("format", types.S).Description("string with formatting verbs"), + types.Named("values", types.NewArray(nil, types.A)).Description("arguments to format into formatting verbs"), + ), + types.Named("output", types.S).Description("`format` formatted by the values in `values`"), + ), + Categories: stringsCat, +} + +var StringReverse = &Builtin{ + Name: "strings.reverse", + Description: "Reverses a given string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to reverse"), + ), + types.Named("y", types.S).Description("reversed string"), + ), + Categories: stringsCat, +} + +var RenderTemplate = &Builtin{ + Name: "strings.render_template", + Description: `Renders a templated string with given template variables injected. For a given templated string and key/value mapping, values will be injected into the template where they are referenced by key. + For examples of templating syntax, see https://pkg.go.dev/text/template`, + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("a templated string"), + types.Named("vars", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("a mapping of template variable keys to values"), + ), + types.Named("result", types.S).Description("rendered template with template variables injected"), + ), + Categories: stringsCat, +} + +/** + * Numbers + */ + +// RandIntn returns a random number 0 - n +// Marked non-deterministic because it relies on RNG internally. +var RandIntn = &Builtin{ + Name: "rand.intn", + Description: "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.", + Decl: types.NewFunction( + types.Args( + types.Named("str", types.S).Description("seed string for the random number"), + types.Named("n", types.N).Description("upper bound of the random number (exclusive)"), + ), + types.Named("y", types.N).Description("random integer in the range `[0, abs(n))`"), + ), + Categories: number, + Nondeterministic: true, +} + +var NumbersRange = &Builtin{ + Name: "numbers.range", + Description: "Returns an array of numbers in the given (inclusive) range. If `a==b`, then `range == [a]`; if `a > b`, then `range` is in descending order.", + Decl: types.NewFunction( + types.Args( + types.Named("a", types.N).Description("the start of the range"), + types.Named("b", types.N).Description("the end of the range (inclusive)"), + ), + types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b`"), + ), +} + +var NumbersRangeStep = &Builtin{ + Name: "numbers.range_step", + Description: `Returns an array of numbers in the given (inclusive) range incremented by a positive step. + If "a==b", then "range == [a]"; if "a > b", then "range" is in descending order. + If the provided "step" is less then 1, an error will be thrown. + If "b" is not in the range of the provided "step", "b" won't be included in the result. + `, + Decl: types.NewFunction( + types.Args( + types.Named("a", types.N).Description("the start of the range"), + types.Named("b", types.N).Description("the end of the range (inclusive)"), + types.Named("step", types.N).Description("the step between numbers in the range"), + ), + types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b` in `step` increments"), + ), +} + +/** + * Units + */ + +var UnitsParse = &Builtin{ + Name: "units.parse", + Description: `Converts strings like "10G", "5K", "4M", "1500m", and the like into a number. +This number can be a non-integer, such as 1.5, 0.22, etc. Scientific notation is supported, +allowing values such as "1e-3K" (1) or "2.5e6M" (2.5 million M). + +Supports standard metric decimal and binary SI units (e.g., K, Ki, M, Mi, G, Gi, etc.) where +m, K, M, G, T, P, and E are treated as decimal units and Ki, Mi, Gi, Ti, Pi, and Ei are treated as +binary units. + +Note that 'm' and 'M' are case-sensitive to allow distinguishing between "milli" and "mega" units +respectively. Other units are case-insensitive.`, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the unit to parse"), + ), + types.Named("y", types.N).Description("the parsed number"), + ), +} + +var UnitsParseBytes = &Builtin{ + Name: "units.parse_bytes", + Description: `Converts strings like "10GB", "5K", "4mb", or "1e6KB" into an integer number of bytes. + +Supports standard byte units (e.g., KB, KiB, etc.) where KB, MB, GB, and TB are treated as decimal +units, and KiB, MiB, GiB, and TiB are treated as binary units. Scientific notation is supported, +enabling values like "1.5e3MB" (1500MB) or "2e6GiB" (2 million GiB). + +The bytes symbol (b/B) in the unit is optional; omitting it will yield the same result (e.g., "Mi" +and "MiB" are equivalent).`, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the byte unit to parse"), + ), + types.Named("y", types.N).Description("the parsed number"), + ), +} + +// +/** + * Type + */ + +// UUIDRFC4122 returns a version 4 UUID string. +// Marked non-deterministic because it relies on RNG internally. +var UUIDRFC4122 = &Builtin{ + Name: "uuid.rfc4122", + Description: "Returns a new UUIDv4.", + Decl: types.NewFunction( + types.Args( + types.Named("k", types.S).Description("seed string"), + ), + types.Named("output", types.S).Description("a version 4 UUID; for any given `k`, the output will be consistent throughout a query evaluation"), + ), + Nondeterministic: true, +} + +var UUIDParse = &Builtin{ + Name: "uuid.parse", + Description: "Parses the string value as an UUID and returns an object with the well-defined fields of the UUID if valid.", + Categories: nil, + Decl: types.NewFunction( + types.Args( + types.Named("uuid", types.S).Description("UUID string to parse"), + ), + types.Named("result", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("Properties of UUID if valid (version, variant, etc). Undefined otherwise."), + ), + Relation: false, +} + +/** + * JSON + */ + +var objectCat = category("object") + +var JSONFilter = &Builtin{ + Name: "json.filter", + Description: "Filters the object. " + + "For example: `json.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"b\": \"x\"}}`). " + + "Paths are not filtered in-order and are deduplicated before being evaluated.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to filter"), + types.Named("paths", types.NewAny( + types.NewArray( + nil, + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + types.NewSet( + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + )).Description("JSON string paths"), + ), + types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `paths`"), + ), + Categories: objectCat, +} + +var JSONRemove = &Builtin{ + Name: "json.remove", + Description: "Removes paths from an object. " + + "For example: `json.remove({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"c\": \"y\"}}`. " + + "Paths are not removed in-order and are deduplicated before being evaluated.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to remove paths from"), + types.Named("paths", types.NewAny( + types.NewArray( + nil, + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + types.NewSet( + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + )).Description("JSON string paths"), + ), + types.Named("output", types.A).Description("result of removing all keys specified in `paths`"), + ), + Categories: objectCat, +} + +var JSONPatch = &Builtin{ + Name: "json.patch", + Description: "Patches an object according to RFC6902. " + + "For example: `json.patch({\"a\": {\"foo\": 1}}, [{\"op\": \"add\", \"path\": \"/a/bar\", \"value\": 2}])` results in `{\"a\": {\"foo\": 1, \"bar\": 2}`. " + + "The patches are applied atomically: if any of them fails, the result will be undefined. " + + "Additionally works on sets, where a value contained in the set is considered to be its path.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.A).Description("the object to patch"), // TODO(sr): types.A? + types.Named("patches", types.NewArray( + nil, + types.NewObject( + []*types.StaticProperty{ + {Key: "op", Value: types.S}, + {Key: "path", Value: types.A}, + }, + types.NewDynamicProperty(types.A, types.A), + ), + )).Description("the JSON patches to apply"), + ), + types.Named("output", types.A).Description("result obtained after consecutively applying all patch operations in `patches`"), + ), + Categories: objectCat, +} + +var ObjectSubset = &Builtin{ + Name: "object.subset", + Description: "Determines if an object `sub` is a subset of another object `super`." + + "Object `sub` is a subset of object `super` if and only if every key in `sub` is also in `super`, " + + "**and** for all keys which `sub` and `super` share, they have the same value. " + + "This function works with objects, sets, arrays and a set of array and set." + + "If both arguments are objects, then the operation is recursive, e.g. " + + "`{\"c\": {\"x\": {10, 15, 20}}` is a subset of `{\"a\": \"b\", \"c\": {\"x\": {10, 15, 20, 25}, \"y\": \"z\"}`. " + + "If both arguments are sets, then this function checks if every element of `sub` is a member of `super`, " + + "but does not attempt to recurse. If both arguments are arrays, " + + "then this function checks if `sub` appears contiguously in order within `super`, " + + "and also does not attempt to recurse. If `super` is array and `sub` is set, " + + "then this function checks if `super` contains every element of `sub` with no consideration of ordering, " + + "and also does not attempt to recurse.", + Decl: types.NewFunction( + types.Args( + types.Named("super", types.NewAny(types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + ), types.SetOfAny, + types.NewArray(nil, types.A), + )).Description("object to test if sub is a subset of"), + types.Named("sub", types.NewAny(types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + ), types.SetOfAny, + types.NewArray(nil, types.A), + )).Description("object to test if super is a superset of"), + ), + types.Named("result", types.A).Description("`true` if `sub` is a subset of `super`"), + ), +} + +var ObjectUnion = &Builtin{ + Name: "object.union", + Description: "Creates a new object of the asymmetric union of two objects. " + + "For example: `object.union({\"a\": 1, \"b\": 2, \"c\": {\"d\": 3}}, {\"a\": 7, \"c\": {\"d\": 4, \"e\": 5}})` will result in `{\"a\": 7, \"b\": 2, \"c\": {\"d\": 4, \"e\": 5}}`.", + Decl: types.NewFunction( + types.Args( + types.Named("a", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("left-hand object"), + types.Named("b", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("right-hand object"), + ), + types.Named("output", types.A).Description("a new object which is the result of an asymmetric recursive union of two objects where conflicts are resolved by choosing the key from the right-hand object `b`"), + ), // TODO(sr): types.A? ^^^^^^^ (also below) +} + +var ObjectUnionN = &Builtin{ + Name: "object.union_n", + Description: "Creates a new object that is the asymmetric union of all objects merged from left to right. " + + "For example: `object.union_n([{\"a\": 1}, {\"b\": 2}, {\"a\": 3}])` will result in `{\"b\": 2, \"a\": 3}`.", + Decl: types.NewFunction( + types.Args( + types.Named("objects", types.NewArray( + nil, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + )).Description("list of objects to merge"), + ), + types.Named("output", types.A).Description("asymmetric recursive union of all objects in `objects`, merged from left to right, where conflicts are resolved by choosing the key from the right-hand object"), + ), +} + +var ObjectRemove = &Builtin{ + Name: "object.remove", + Description: "Removes specified keys from an object.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to remove keys from"), + types.Named("keys", types.NewAny( + types.NewArray(nil, types.A), + types.SetOfAny, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + )).Description("keys to remove from x"), + ), + types.Named("output", types.A).Description("result of removing the specified `keys` from `object`"), + ), +} + +var ObjectFilter = &Builtin{ + Name: "object.filter", + Description: "Filters the object by keeping only specified keys. " + + "For example: `object.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}, \"d\": \"z\"}, [\"a\"])` will result in `{\"a\": {\"b\": \"x\", \"c\": \"y\"}}`).", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to filter keys"), + types.Named("keys", types.NewAny( + types.NewArray(nil, types.A), + types.SetOfAny, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + )).Description("keys to keep in `object`"), + ), + types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `keys`"), + ), +} + +var ObjectGet = &Builtin{ + Name: "object.get", + Description: "Returns value of an object's key if present, otherwise a default. " + + "If the supplied `key` is an `array`, then `object.get` will search through a nested object or array using each key in turn. " + + "For example: `object.get({\"a\": [{ \"b\": true }]}, [\"a\", 0, \"b\"], false)` results in `true`.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get `key` from"), + types.Named("key", types.A).Description("key to lookup in `object`"), + types.Named("default", types.A).Description("default to use when lookup fails"), + ), + types.Named("value", types.A).Description("`object[key]` if present, otherwise `default`"), + ), +} + +var ObjectKeys = &Builtin{ + Name: "object.keys", + Description: "Returns a set of an object's keys. " + + "For example: `object.keys({\"a\": 1, \"b\": true, \"c\": \"d\")` results in `{\"a\", \"b\", \"c\"}`.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"), + ), + types.Named("value", types.SetOfAny).Description("set of `object`'s keys"), + ), +} + +/* + * Encoding + */ +var encoding = category("encoding") + +var JSONMarshal = &Builtin{ + Name: "json.marshal", + Description: "Serializes the input term to JSON.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + ), + types.Named("y", types.S).Description("the JSON string representation of `x`"), + ), + Categories: encoding, +} + +var JSONMarshalWithOptions = &Builtin{ + Name: "json.marshal_with_options", + Description: "Serializes the input term JSON, with additional formatting options via the `opts` parameter. " + + "`opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + types.Named("opts", types.NewObject( + []*types.StaticProperty{ + types.NewStaticProperty("pretty", types.B), + types.NewStaticProperty("indent", types.S), + types.NewStaticProperty("prefix", types.S), + }, + types.NewDynamicProperty(types.S, types.A), + )).Description("encoding options"), + ), + types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"), + ), + Categories: encoding, +} + +var JSONUnmarshal = &Builtin{ + Name: "json.unmarshal", + Description: "Deserializes the input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a JSON string"), + ), + types.Named("y", types.A).Description("the term deserialized from `x`"), + ), + Categories: encoding, +} + +var JSONIsValid = &Builtin{ + Name: "json.is_valid", + Description: "Verifies the input string is a valid JSON document.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a JSON string"), + ), + types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"), + ), + Categories: encoding, +} + +var Base64Encode = &Builtin{ + Name: "base64.encode", + Description: "Serializes the input string into base64 encoding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("base64 serialization of `x`"), + ), + Categories: encoding, +} + +var Base64Decode = &Builtin{ + Name: "base64.decode", + Description: "Deserializes the base64 encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to decode"), + ), + types.Named("y", types.S).Description("base64 deserialization of `x`"), + ), + Categories: encoding, +} + +var Base64IsValid = &Builtin{ + Name: "base64.is_valid", + Description: "Verifies the input string is base64 encoded.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to check"), + ), + types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"), + ), + Categories: encoding, +} + +var Base64UrlEncode = &Builtin{ + Name: "base64url.encode", + Description: "Serializes the input string into base64url encoding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("base64url serialization of `x`"), + ), + Categories: encoding, +} + +var Base64UrlEncodeNoPad = &Builtin{ + Name: "base64url.encode_no_pad", + Description: "Serializes the input string into base64url encoding without padding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("base64url serialization of `x`"), + ), + Categories: encoding, +} + +var Base64UrlDecode = &Builtin{ + Name: "base64url.decode", + Description: "Deserializes the base64url encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to decode"), + ), + types.Named("y", types.S).Description("base64url deserialization of `x`"), + ), + Categories: encoding, +} + +var URLQueryDecode = &Builtin{ + Name: "urlquery.decode", + Description: "Decodes a URL-encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the URL-encoded string"), + ), + types.Named("y", types.S).Description("URL-encoding deserialization of `x`"), + ), + Categories: encoding, +} + +var URLQueryEncode = &Builtin{ + Name: "urlquery.encode", + Description: "Encodes the input string into a URL-encoded string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the string to encode"), + ), + types.Named("y", types.S).Description("URL-encoding serialization of `x`"), + ), + Categories: encoding, +} + +var URLQueryEncodeObject = &Builtin{ + Name: "urlquery.encode_object", + Description: "Encodes the given object into a URL encoded query string.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty( + types.S, + types.NewAny( + types.S, + types.NewArray(nil, types.S), + types.SetOfStr, + ), + ), + ), + ).Description("the object to encode"), + ), + types.Named("y", types.S).Description("the URL-encoded serialization of `object`"), + ), + Categories: encoding, +} + +var URLQueryDecodeObject = &Builtin{ + Name: "urlquery.decode_object", + Description: "Decodes the given URL query string into an object.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the query string"), + ), + types.Named("object", types.NewObject(nil, types.NewDynamicProperty( + types.S, + types.NewArray(nil, types.S)))).Description("the resulting object"), + ), + Categories: encoding, +} + +var YAMLMarshal = &Builtin{ + Name: "yaml.marshal", + Description: "Serializes the input term to YAML.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + ), + types.Named("y", types.S).Description("the YAML string representation of `x`"), + ), + Categories: encoding, +} + +var YAMLUnmarshal = &Builtin{ + Name: "yaml.unmarshal", + Description: "Deserializes the input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a YAML string"), + ), + types.Named("y", types.A).Description("the term deserialized from `x`"), + ), + Categories: encoding, +} + +// YAMLIsValid verifies the input string is a valid YAML document. +var YAMLIsValid = &Builtin{ + Name: "yaml.is_valid", + Description: "Verifies the input string is a valid YAML document.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a YAML string"), + ), + types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"), + ), + Categories: encoding, +} + +var HexEncode = &Builtin{ + Name: "hex.encode", + Description: "Serializes the input string using hex-encoding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("serialization of `x` using hex-encoding"), + ), + Categories: encoding, +} + +var HexDecode = &Builtin{ + Name: "hex.decode", + Description: "Deserializes the hex-encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a hex-encoded string"), + ), + types.Named("y", types.S).Description("deserialized from `x`"), + ), + Categories: encoding, +} + +/** + * Tokens + */ +var tokensCat = category("tokens") + +var JWTDecode = &Builtin{ + Name: "io.jwt.decode", + Description: "Decodes a JSON Web Token and outputs it as an object.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token to decode"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.S, + }, nil)).Description("`[header, payload, sig]`, where `header` and `payload` are objects; `sig` is the hexadecimal representation of the signature on the token."), + ), + Categories: tokensCat, +} + +var JWTVerifyRS256 = &Builtin{ + Name: "io.jwt.verify_rs256", + Description: "Verifies if a RS256 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyRS384 = &Builtin{ + Name: "io.jwt.verify_rs384", + Description: "Verifies if a RS384 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyRS512 = &Builtin{ + Name: "io.jwt.verify_rs512", + Description: "Verifies if a RS512 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyPS256 = &Builtin{ + Name: "io.jwt.verify_ps256", + Description: "Verifies if a PS256 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyPS384 = &Builtin{ + Name: "io.jwt.verify_ps384", + Description: "Verifies if a PS384 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyPS512 = &Builtin{ + Name: "io.jwt.verify_ps512", + Description: "Verifies if a PS512 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyES256 = &Builtin{ + Name: "io.jwt.verify_es256", + Description: "Verifies if a ES256 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyES384 = &Builtin{ + Name: "io.jwt.verify_es384", + Description: "Verifies if a ES384 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyES512 = &Builtin{ + Name: "io.jwt.verify_es512", + Description: "Verifies if a ES512 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyHS256 = &Builtin{ + Name: "io.jwt.verify_hs256", + Description: "Verifies if a HS256 (secret) JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("secret", types.S).Description("plain text secret used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyHS384 = &Builtin{ + Name: "io.jwt.verify_hs384", + Description: "Verifies if a HS384 (secret) JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("secret", types.S).Description("plain text secret used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyHS512 = &Builtin{ + Name: "io.jwt.verify_hs512", + Description: "Verifies if a HS512 (secret) JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("secret", types.S).Description("plain text secret used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +// Marked non-deterministic because it relies on time internally. +var JWTDecodeVerify = &Builtin{ + Name: "io.jwt.decode_verify", + Description: `Verifies a JWT signature under parameterized constraints and decodes the claims if it is valid. +Supports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, PS256, PS384 and PS512.`, + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified and whose claims are to be checked"), + types.Named("constraints", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("claim verification constraints"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + }, nil)).Description("`[valid, header, payload]`: if the input token is verified and meets the requirements of `constraints` then `valid` is `true`; `header` and `payload` are objects containing the JOSE header and the JWT claim set; otherwise, `valid` is `false`, `header` and `payload` are `{}`"), + ), + Categories: tokensCat, + Nondeterministic: true, +} + +var tokenSign = category("tokensign") + +// Marked non-deterministic because it relies on RNG internally. +var JWTEncodeSignRaw = &Builtin{ + Name: "io.jwt.encode_sign_raw", + Description: "Encodes and optionally signs a JSON Web Token.", + Decl: types.NewFunction( + types.Args( + types.Named("headers", types.S).Description("JWS Protected Header"), + types.Named("payload", types.S).Description("JWS Payload"), + types.Named("key", types.S).Description("JSON Web Key (RFC7517)"), + ), + types.Named("output", types.S).Description("signed JWT"), + ), + Categories: tokenSign, + Nondeterministic: true, +} + +// Marked non-deterministic because it relies on RNG internally. +var JWTEncodeSign = &Builtin{ + Name: "io.jwt.encode_sign", + Description: "Encodes and optionally signs a JSON Web Token. Inputs are taken as objects, not encoded strings (see `io.jwt.encode_sign_raw`).", + Decl: types.NewFunction( + types.Args( + types.Named("headers", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Protected Header"), + types.Named("payload", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Payload"), + types.Named("key", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JSON Web Key (RFC7517)"), + ), + types.Named("output", types.S).Description("signed JWT"), + ), + Categories: tokenSign, + Nondeterministic: true, +} + +/** + * Time + */ + +// Marked non-deterministic because it relies on time directly. +var NowNanos = &Builtin{ + Name: "time.now_ns", + Description: "Returns the current time since epoch in nanoseconds.", + Decl: types.NewFunction( + nil, + types.Named("now", types.N).Description("nanoseconds since epoch"), + ), + Nondeterministic: true, +} + +var ParseNanos = &Builtin{ + Name: "time.parse_ns", + Description: "Returns the time in nanoseconds parsed from the string in the given format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", + Decl: types.NewFunction( + types.Args( + types.Named("layout", types.S).Description("format used for parsing, see the [Go `time` package documentation](https://golang.org/pkg/time/#Parse) for more details"), + types.Named("value", types.S).Description("input to parse according to `layout`"), + ), + types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), + ), +} + +var ParseRFC3339Nanos = &Builtin{ + Name: "time.parse_rfc3339_ns", + Description: "Returns the time in nanoseconds parsed from the string in RFC3339 format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("input string to parse in RFC3339 format"), + ), + types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), + ), +} + +var ParseDurationNanos = &Builtin{ + Name: "time.parse_duration_ns", + Description: "Returns the duration in nanoseconds represented by a string.", + Decl: types.NewFunction( + types.Args( + types.Named("duration", types.S).Description("a duration like \"3m\"; see the [Go `time` package documentation](https://golang.org/pkg/time/#ParseDuration) for more details"), + ), + types.Named("ns", types.N).Description("the `duration` in nanoseconds"), + ), +} + +var Format = &Builtin{ + Name: "time.format", + Description: "Returns the formatted timestamp for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + types.NewArray([]types.Type{types.N, types.S, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string or golang defined formatting constant (see golang supported time formats)"), + ), + types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"), + ), +} + +var Date = &Builtin{ + Name: "time.date", + Description: "Returns the `[year, month, day]` for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("date", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).Description("an array of `year`, `month` (1-12), and `day` (1-31)"), + ), +} + +var Clock = &Builtin{ + Name: "time.clock", + Description: "Returns the `[hour, minute, second]` of the day for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)). + Description("the `hour`, `minute` (0-59), and `second` (0-59) representing the time of day for the nanoseconds since epoch in the supplied timezone (or UTC)"), + ), +} + +var Weekday = &Builtin{ + Name: "time.weekday", + Description: "Returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("day", types.S).Description("the weekday represented by `ns` nanoseconds since the epoch in the supplied timezone (or UTC)"), + ), +} + +var AddDate = &Builtin{ + Name: "time.add_date", + Description: "Returns the nanoseconds since epoch after adding years, months and days to nanoseconds. Month & day values outside their usual ranges after the operation and will be normalized - for example, October 32 would become November 1. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", + Decl: types.NewFunction( + types.Args( + types.Named("ns", types.N).Description("nanoseconds since the epoch"), + types.Named("years", types.N).Description("number of years to add"), + types.Named("months", types.N).Description("number of months to add"), + types.Named("days", types.N).Description("number of days to add"), + ), + types.Named("output", types.N).Description("nanoseconds since the epoch representing the input time, with years, months and days added"), + ), +} + +var Diff = &Builtin{ + Name: "time.diff", + Description: "Returns the difference between two unix timestamps in nanoseconds (with optional timezone strings).", + Decl: types.NewFunction( + types.Args( + types.Named("ns1", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("nanoseconds since the epoch; or a two-element array of the nanoseconds, and a timezone string"), + types.Named("ns2", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("nanoseconds since the epoch; or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N, types.N, types.N, types.N}, nil)).Description("difference between `ns1` and `ns2` (in their supplied timezones, if supplied, or UTC) as array of numbers: `[years, months, days, hours, minutes, seconds]`"), + ), +} + +/** + * Crypto. + */ + +var CryptoX509ParseCertificates = &Builtin{ + Name: "crypto.x509.parse_certificates", + Description: `Returns zero or more certificates from the given encoded string containing +DER certificate data. + +If the input is empty, the function will return null. The input string should be a list of one or more +concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing one or more certificates or a PEM string of one or more certificates"), + ), + types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed X.509 certificates represented as objects"), + ), +} + +var CryptoX509ParseAndVerifyCertificates = &Builtin{ + Name: "crypto.x509.parse_and_verify_certificates", + Description: `Returns one or more certificates from the given string containing PEM +or base64 encoded DER certificates after verifying the supplied certificates form a complete +certificate chain back to a trusted root. + +The first certificate is treated as the root and the last is treated as the leaf, +with all others being treated as intermediates.`, + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), + }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), + ), +} + +var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{ + Name: "crypto.x509.parse_and_verify_certificates_with_options", + Description: `Returns one or more certificates from the given string containing PEM +or base64 encoded DER certificates after verifying the supplied certificates form a complete +certificate chain back to a trusted root. A config option passed as the second argument can +be used to configure the validation options used. + +The first certificate is treated as the root and the last is treated as the leaf, +with all others being treated as intermediates.`, + + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), + types.Named("options", types.NewObject( + nil, + types.NewDynamicProperty(types.S, types.A), + )).Description("object containing extra configs to verify the validity of certificates. `options` object supports four fields which maps to same fields in [x509.VerifyOptions struct](https://pkg.go.dev/crypto/x509#VerifyOptions). `DNSName`, `CurrentTime`: Nanoseconds since the Unix Epoch as a number, `MaxConstraintComparisons` and `KeyUsages`. `KeyUsages` is list and can have possible values as in: `\"KeyUsageAny\"`, `\"KeyUsageServerAuth\"`, `\"KeyUsageClientAuth\"`, `\"KeyUsageCodeSigning\"`, `\"KeyUsageEmailProtection\"`, `\"KeyUsageIPSECEndSystem\"`, `\"KeyUsageIPSECTunnel\"`, `\"KeyUsageIPSECUser\"`, `\"KeyUsageTimeStamping\"`, `\"KeyUsageOCSPSigning\"`, `\"KeyUsageMicrosoftServerGatedCrypto\"`, `\"KeyUsageNetscapeServerGatedCrypto\"`, `\"KeyUsageMicrosoftCommercialCodeSigning\"`, `\"KeyUsageMicrosoftKernelCodeSigning\"` "), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), + }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), + ), +} + +var CryptoX509ParseCertificateRequest = &Builtin{ + Name: "crypto.x509.parse_certificate_request", + Description: "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.", + Decl: types.NewFunction( + types.Args( + types.Named("csr", types.S).Description("base64 string containing either a PEM encoded or DER CSR or a string containing a PEM CSR"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("X.509 CSR represented as an object"), + ), +} + +var CryptoX509ParseKeyPair = &Builtin{ + Name: "crypto.x509.parse_keypair", + Description: "Returns a valid key pair", + Decl: types.NewFunction( + types.Args( + types.Named("cert", types.S).Description("string containing PEM or base64 encoded DER certificates"), + types.Named("pem", types.S).Description("string containing PEM or base64 encoded DER keys"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."), + ), +} +var CryptoX509ParseRSAPrivateKey = &Builtin{ + Name: "crypto.x509.parse_rsa_private_key", + Description: "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.", + Decl: types.NewFunction( + types.Args( + types.Named("pem", types.S).Description("base64 string containing a PEM encoded RSA private key"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWK as an object"), + ), +} + +var CryptoParsePrivateKeys = &Builtin{ + Name: "crypto.parse_private_keys", + Description: `Returns zero or more private keys from the given encoded string containing DER certificate data. + +If the input is empty, the function will return null. The input string should be a list of one or more concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, + Decl: types.NewFunction( + types.Args( + types.Named("keys", types.S).Description("PEM encoded data containing one or more private keys as concatenated blocks. Optionally Base64 encoded."), + ), + types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed private keys represented as objects"), + ), +} + +var CryptoMd5 = &Builtin{ + Name: "crypto.md5", + Description: "Returns a string representing the input string hashed with the MD5 function", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + ), + types.Named("y", types.S).Description("MD5-hash of `x`"), + ), +} + +var CryptoSha1 = &Builtin{ + Name: "crypto.sha1", + Description: "Returns a string representing the input string hashed with the SHA1 function", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + ), + types.Named("y", types.S).Description("SHA1-hash of `x`"), + ), +} + +var CryptoSha256 = &Builtin{ + Name: "crypto.sha256", + Description: "Returns a string representing the input string hashed with the SHA256 function", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + ), + types.Named("y", types.S).Description("SHA256-hash of `x`"), + ), +} + +var CryptoHmacMd5 = &Builtin{ + Name: "crypto.hmac.md5", + Description: "Returns a string representing the MD5 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("MD5-HMAC of `x`"), + ), +} + +var CryptoHmacSha1 = &Builtin{ + Name: "crypto.hmac.sha1", + Description: "Returns a string representing the SHA1 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("SHA1-HMAC of `x`"), + ), +} + +var CryptoHmacSha256 = &Builtin{ + Name: "crypto.hmac.sha256", + Description: "Returns a string representing the SHA256 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("SHA256-HMAC of `x`"), + ), +} + +var CryptoHmacSha512 = &Builtin{ + Name: "crypto.hmac.sha512", + Description: "Returns a string representing the SHA512 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("SHA512-HMAC of `x`"), + ), +} + +var CryptoHmacEqual = &Builtin{ + Name: "crypto.hmac.equal", + Description: "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.", + Decl: types.NewFunction( + types.Args( + types.Named("mac1", types.S).Description("mac1 to compare"), + types.Named("mac2", types.S).Description("mac2 to compare"), + ), + types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"), + ), +} + +/** + * Graphs. + */ +var graphs = category("graph") + +var WalkBuiltin = &Builtin{ + Name: "walk", + Relation: true, + Description: "Generates `[path, value]` tuples for all nested documents of `x` (recursively). Queries can use `walk` to traverse documents nested under `x`.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("value to walk"), + ), + types.Named("output", types.NewArray( + []types.Type{ + types.NewArray(nil, types.A), + types.A, + }, + nil, + )).Description("pairs of `path` and `value`: `path` is an array representing the pointer to `value` in `x`. If `path` is assigned a wildcard (`_`), the `walk` function will skip path creation entirely for faster evaluation."), + ), + Categories: graphs, +} + +var ReachableBuiltin = &Builtin{ + Name: "graph.reachable", + Description: "Computes the set of reachable nodes in the graph from a set of starting nodes.", + Decl: types.NewFunction( + types.Args( + types.Named("graph", types.NewObject( + nil, + types.NewDynamicProperty( + types.A, + types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A)), + )), + ).Description("object containing a set or array of neighboring vertices"), + types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("set or array of root vertices"), + ), + types.Named("output", types.SetOfAny).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"), + ), +} + +var ReachablePathsBuiltin = &Builtin{ + Name: "graph.reachable_paths", + Description: "Computes the set of reachable paths in the graph from a set of starting nodes.", + Decl: types.NewFunction( + types.Args( + types.Named("graph", types.NewObject( + nil, + types.NewDynamicProperty( + types.A, + types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A)), + )), + ).Description("object containing a set or array of root vertices"), + types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct? + ), + types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"), + ), +} + +/** + * Type + */ +var typesCat = category("types") + +var IsNumber = &Builtin{ + Name: "is_number", + Description: "Returns `true` if the input value is a number.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is a number, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsString = &Builtin{ + Name: "is_string", + Description: "Returns `true` if the input value is a string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is a string, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsBoolean = &Builtin{ + Name: "is_boolean", + Description: "Returns `true` if the input value is a boolean.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is an boolean, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsArray = &Builtin{ + Name: "is_array", + Description: "Returns `true` if the input value is an array.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is an array, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsSet = &Builtin{ + Name: "is_set", + Description: "Returns `true` if the input value is a set.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is a set, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsObject = &Builtin{ + Name: "is_object", + Description: "Returns true if the input value is an object", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is an object, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsNull = &Builtin{ + Name: "is_null", + Description: "Returns `true` if the input value is null.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is null, `false` otherwise."), + ), + Categories: typesCat, +} + +/** + * Type Name + */ + +// TypeNameBuiltin returns the type of the input. +var TypeNameBuiltin = &Builtin{ + Name: "type_name", + Description: "Returns the type of its input value.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("type", types.S).Description(`one of "null", "boolean", "number", "string", "array", "object", "set"`), + ), + Categories: typesCat, +} + +/** + * HTTP Request + */ + +// Marked non-deterministic because HTTP request results can be non-deterministic. +var HTTPSend = &Builtin{ + Name: "http.send", + Description: "Returns a HTTP response to the given HTTP request.", + Decl: types.NewFunction( + types.Args( + types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("the HTTP request object"), + ), + types.Named("response", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))). + Description("the HTTP response object"), + ), + Nondeterministic: true, +} + +/** + * GraphQL + */ + +// GraphQLParse returns a pair of AST objects from parsing/validation. +var GraphQLParse = &Builtin{ + Name: "graphql.parse", + Description: "Returns AST objects for a given GraphQL query and schema after validating the query against the schema. Returns undefined if errors were encountered during parsing or validation. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL query"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL schema"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + }, nil)).Description("`output` is of the form `[query_ast, schema_ast]`. If the GraphQL query is valid given the provided schema, then `query_ast` and `schema_ast` are objects describing the ASTs for the query and schema."), + ), +} + +// GraphQLParseAndVerify returns a boolean and a pair of AST object from parsing/validation. +var GraphQLParseAndVerify = &Builtin{ + Name: "graphql.parse_and_verify", + Description: "Returns a boolean indicating success or failure alongside the parsed ASTs for a given GraphQL query and schema after validating the query against the schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL query"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL schema"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + }, nil)).Description(" `output` is of the form `[valid, query_ast, schema_ast]`. If the query is valid given the provided schema, then `valid` is `true`, and `query_ast` and `schema_ast` are objects describing the ASTs for the GraphQL query and schema. Otherwise, `valid` is `false` and `query_ast` and `schema_ast` are `{}`."), + ), +} + +// GraphQLParseQuery parses the input GraphQL query and returns a JSON +// representation of its AST. +var GraphQLParseQuery = &Builtin{ + Name: "graphql.parse_query", + Description: "Returns an AST object for a GraphQL query.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.S).Description("GraphQL query string"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL query."), + ), +} + +// GraphQLParseSchema parses the input GraphQL schema and returns a JSON +// representation of its AST. +var GraphQLParseSchema = &Builtin{ + Name: "graphql.parse_schema", + Description: "Returns an AST object for a GraphQL schema.", + Decl: types.NewFunction( + types.Args( + types.Named("schema", types.S).Description("GraphQL schema string"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL schema."), + ), +} + +// GraphQLIsValid returns true if a GraphQL query is valid with a given +// schema, and returns false for all other inputs. +var GraphQLIsValid = &Builtin{ + Name: "graphql.is_valid", + Description: "Checks that a GraphQL query is valid against a given schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL query"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL schema"), + ), + types.Named("output", types.B).Description("`true` if the query is valid under the given schema. `false` otherwise."), + ), +} + +// GraphQLSchemaIsValid returns true if the input is valid GraphQL schema, +// and returns false for all other inputs. +var GraphQLSchemaIsValid = &Builtin{ + Name: "graphql.schema_is_valid", + Description: "Checks that the input is a valid GraphQL schema. The schema can be either a GraphQL string or an AST object from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the schema to verify"), + ), + types.Named("output", types.B).Description("`true` if the schema is a valid GraphQL schema. `false` otherwise."), + ), +} + +/** + * JSON Schema + */ + +// JSONSchemaVerify returns empty string if the input is valid JSON schema +// and returns error string for all other inputs. +var JSONSchemaVerify = &Builtin{ + Name: "json.verify_schema", + Description: "Checks that the input is a valid JSON schema object. The schema can be either a JSON string or an JSON object.", + Decl: types.NewFunction( + types.Args( + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the schema to verify"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewAny(types.S, types.Null{}), + }, nil)). + Description("`output` is of the form `[valid, error]`. If the schema is valid, then `valid` is `true`, and `error` is `null`. Otherwise, `valid` is `false` and `error` is a string describing the error."), + ), + Categories: objectCat, +} + +// JSONMatchSchema returns empty array if the document matches the JSON schema, +// and returns non-empty array with error objects otherwise. +var JSONMatchSchema = &Builtin{ + Name: "json.match_schema", + Description: "Checks that the document matches the JSON schema.", + Decl: types.NewFunction( + types.Args( + types.Named("document", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("document to verify by schema"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("schema to verify document by"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray( + nil, types.NewObject( + []*types.StaticProperty{ + {Key: "error", Value: types.S}, + {Key: "type", Value: types.S}, + {Key: "field", Value: types.S}, + {Key: "desc", Value: types.S}, + }, + nil, + ), + ), + }, nil)). + Description("`output` is of the form `[match, errors]`. If the document is valid given the schema, then `match` is `true`, and `errors` is an empty array. Otherwise, `match` is `false` and `errors` is an array of objects describing the error(s)."), + ), + Categories: objectCat, +} + +/** + * Cloud Provider Helper Functions + */ +var providersAWSCat = category("providers.aws") + +var ProvidersAWSSignReqObj = &Builtin{ + Name: "providers.aws.sign_req", + Description: "Signs an HTTP request object for Amazon Web Services. Currently implements [AWS Signature Version 4 request signing](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) by the `Authorization` header method.", + Decl: types.NewFunction( + types.Args( + types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("HTTP request object"), + types.Named("aws_config", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("AWS configuration object"), + types.Named("time_ns", types.N).Description("nanoseconds since the epoch"), + ), + types.Named("signed_request", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))). + Description("HTTP request object with `Authorization` header"), + ), + Categories: providersAWSCat, +} + +/** + * Rego + */ + +var RegoParseModule = &Builtin{ + Name: "rego.parse_module", + Description: "Parses the input Rego string and returns an object representation of the AST.", + Decl: types.NewFunction( + types.Args( + types.Named("filename", types.S).Description("file name to attach to AST nodes' locations"), + types.Named("rego", types.S).Description("Rego module"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("AST object for the Rego module"), + ), +} + +var RegoMetadataChain = &Builtin{ + Name: "rego.metadata.chain", + Description: `Returns the chain of metadata for the active rule. +Ordered starting at the active rule, going outward to the most distant node in its package ancestry. +A chain entry is a JSON document with two members: "path", an array representing the path of the node; and "annotations", a JSON document containing the annotations declared for the node. +The first entry in the chain always points to the active rule, even if it has no declared annotations (in which case the "annotations" member is not present).`, + Decl: types.NewFunction( + types.Args(), + types.Named("chain", types.NewArray(nil, types.A)).Description("each array entry represents a node in the path ancestry (chain) of the active rule that also has declared annotations"), + ), +} + +// RegoMetadataRule returns the metadata for the active rule +var RegoMetadataRule = &Builtin{ + Name: "rego.metadata.rule", + Description: "Returns annotations declared for the active rule and using the _rule_ scope.", + Decl: types.NewFunction( + types.Args(), + types.Named("output", types.A).Description("\"rule\" scope annotations for this rule; empty object if no annotations exist"), + ), +} + +/** + * OPA + */ + +// Marked non-deterministic because of unpredictable config/environment-dependent results. +var OPARuntime = &Builtin{ + Name: "opa.runtime", + Description: "Returns an object that describes the runtime environment where OPA is deployed.", + Decl: types.NewFunction( + nil, + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("includes a `config` key if OPA was started with a configuration file; an `env` key containing the environment variables that the OPA process was started with; includes `version` and `commit` keys containing the version and build commit of OPA."), + ), + Nondeterministic: true, +} + +/** + * Trace + */ +var tracing = category("tracing") + +var Trace = &Builtin{ + Name: "trace", + Description: "Emits `note` as a `Note` event in the query explanation. Query explanations show the exact expressions evaluated by OPA during policy execution. For example, `trace(\"Hello There!\")` includes `Note \"Hello There!\"` in the query explanation. To include variables in the message, use `sprintf`. For example, `person := \"Bob\"; trace(sprintf(\"Hello There! %v\", [person]))` will emit `Note \"Hello There! Bob\"` inside of the explanation.", + Decl: types.NewFunction( + types.Args( + types.Named("note", types.S).Description("the note to include"), + ), + types.Named("result", types.B).Description("always `true`"), + ), + Categories: tracing, +} + +/** + * Glob + */ + +var GlobMatch = &Builtin{ + Name: "glob.match", + Description: "Parses and matches strings against the glob notation. Not to be confused with `regex.globs_match`.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("glob pattern"), + types.Named("delimiters", types.NewAny( + types.NewArray(nil, types.S), + types.NewNull(), + )).Description("glob pattern delimiters, e.g. `[\".\", \":\"]`, defaults to `[\".\"]` if unset. If `delimiters` is `null`, glob match without delimiter."), + types.Named("match", types.S).Description("string to match against `pattern`"), + ), + types.Named("result", types.B).Description("true if `match` can be found in `pattern` which is separated by `delimiters`"), + ), +} + +var GlobQuoteMeta = &Builtin{ + Name: "glob.quote_meta", + Description: "Returns a string which represents a version of the pattern where all asterisks have been escaped.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("glob pattern"), + ), + types.Named("output", types.S).Description("the escaped string of `pattern`"), + ), + // TODO(sr): example for this was: Calling ``glob.quote_meta("*.github.com", output)`` returns ``\\*.github.com`` as ``output``. +} + +/** + * Networking + */ + +var NetCIDRIntersects = &Builtin{ + Name: "net.cidr_intersects", + Description: "Checks if a CIDR intersects with another CIDR (e.g. `192.168.0.0/16` overlaps with `192.168.1.0/24`). Supports both IPv4 and IPv6 notations.", + Decl: types.NewFunction( + types.Args( + types.Named("cidr1", types.S).Description("first CIDR"), + types.Named("cidr2", types.S).Description("second CIDR"), + ), + types.Named("result", types.B).Description("`true` if `cidr1` intersects with `cidr2`"), + ), +} + +var NetCIDRExpand = &Builtin{ + Name: "net.cidr_expand", + Description: "Expands CIDR to set of hosts (e.g., `net.cidr_expand(\"192.168.0.0/30\")` generates 4 hosts: `{\"192.168.0.0\", \"192.168.0.1\", \"192.168.0.2\", \"192.168.0.3\"}`).", + Decl: types.NewFunction( + types.Args( + types.Named("cidr", types.S).Description("CIDR to expand"), + ), + types.Named("hosts", types.SetOfStr).Description("set of IP addresses the CIDR `cidr` expands to"), + ), +} + +var NetCIDRContains = &Builtin{ + Name: "net.cidr_contains", + Description: "Checks if a CIDR or IP is contained within another CIDR. `output` is `true` if `cidr_or_ip` (e.g. `127.0.0.64/26` or `127.0.0.1`) is contained within `cidr` (e.g. `127.0.0.1/24`) and `false` otherwise. Supports both IPv4 and IPv6 notations.", + Decl: types.NewFunction( + types.Args( + types.Named("cidr", types.S).Description("CIDR to check against"), + types.Named("cidr_or_ip", types.S).Description("CIDR or IP to check"), + ), + types.Named("result", types.B).Description("`true` if `cidr_or_ip` is contained within `cidr`"), + ), +} + +var NetCIDRContainsMatches = &Builtin{ + Name: "net.cidr_contains_matches", + Description: "Checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches. " + + "This function is similar to `net.cidr_contains` except it allows callers to pass collections of CIDRs or IPs as arguments and returns the matches (as opposed to a boolean result indicating a match between two CIDRs/IPs).", + Decl: types.NewFunction( + types.Args( + types.Named("cidrs", netCidrContainsMatchesOperandType).Description("CIDRs to check against"), + types.Named("cidrs_or_ips", netCidrContainsMatchesOperandType).Description("CIDRs or IPs to check"), + ), + types.Named("output", types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil))).Description("tuples identifying matches where `cidrs_or_ips` are contained within `cidrs`"), + ), +} + +var NetCIDRMerge = &Builtin{ + Name: "net.cidr_merge", + Description: "Merges IP addresses and subnets into the smallest possible list of CIDRs (e.g., `net.cidr_merge([\"192.0.128.0/24\", \"192.0.129.0/24\"])` generates `{\"192.0.128.0/23\"}`." + + `This function merges adjacent subnets where possible, those contained within others and also removes any duplicates. +Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/128").`, + Decl: types.NewFunction( + types.Args( + types.Named("addrs", types.NewAny( + types.NewArray(nil, types.NewAny(types.S)), + types.SetOfStr, + )).Description("CIDRs or IP addresses"), + ), + types.Named("output", types.SetOfStr).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"), + ), +} + +var NetCIDRIsValid = &Builtin{ + Name: "net.cidr_is_valid", + Description: "Parses an IPv4/IPv6 CIDR and returns a boolean indicating if the provided CIDR is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("cidr", types.S).Description("CIDR to validate"), + ), + types.Named("result", types.B).Description("`true` if `cidr` is a valid CIDR"), + ), +} + +var netCidrContainsMatchesOperandType = types.NewAny( + types.S, + types.NewArray(nil, types.NewAny( + types.S, + types.NewArray(nil, types.A), + )), + types.NewSet(types.NewAny( + types.S, + types.NewArray(nil, types.A), + )), + types.NewObject(nil, types.NewDynamicProperty( + types.S, + types.NewAny( + types.S, + types.NewArray(nil, types.A), + ), + )), +) + +// Marked non-deterministic because DNS resolution results can be non-deterministic. +var NetLookupIPAddr = &Builtin{ + Name: "net.lookup_ip_addr", + Description: "Returns the set of IP addresses (both v4 and v6) that the passed-in `name` resolves to using the standard name resolution mechanisms available.", + Decl: types.NewFunction( + types.Args( + types.Named("name", types.S).Description("domain name to resolve"), + ), + types.Named("addrs", types.SetOfStr).Description("IP addresses (v4 and v6) that `name` resolves to"), + ), + Nondeterministic: true, +} + +/** + * Semantic Versions + */ + +var SemVerIsValid = &Builtin{ + Name: "semver.is_valid", + Description: "Validates that the input is a valid SemVer string.", + Decl: types.NewFunction( + types.Args( + types.Named("vsn", types.A).Description("input to validate"), + ), + types.Named("result", types.B).Description("`true` if `vsn` is a valid SemVer; `false` otherwise"), + ), +} + +var SemVerCompare = &Builtin{ + Name: "semver.compare", + Description: "Compares valid SemVer formatted version strings.", + Decl: types.NewFunction( + types.Args( + types.Named("a", types.S).Description("first version string"), + types.Named("b", types.S).Description("second version string"), + ), + types.Named("result", types.N).Description("`-1` if `a < b`; `1` if `a > b`; `0` if `a == b`"), + ), +} + +/** + * Printing + */ + +// Print is a special built-in function that writes zero or more operands +// to a message buffer. The caller controls how the buffer is displayed. The +// operands may be of any type. Furthermore, unlike other built-in functions, +// undefined operands DO NOT cause the print() function to fail during +// evaluation. +var Print = &Builtin{ + Name: "print", + Decl: types.NewVariadicFunction(nil, types.A, nil), +} + +// InternalPrint represents the internal implementation of the print() function. +// The compiler rewrites print() calls to refer to the internal implementation. +var InternalPrint = &Builtin{ + Name: "internal.print", + Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.SetOfAny)}, nil), +} + +var InternalTestCase = &Builtin{ + Name: "internal.test_case", + Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.A)}, nil), +} + +/** + * Deprecated built-ins. + */ + +// SetDiff has been replaced by the minus built-in. +var SetDiff = &Builtin{ + Name: "set_diff", + Decl: types.NewFunction( + types.Args( + types.SetOfAny, + types.SetOfAny, + ), + types.SetOfAny, + ), + deprecated: true, +} + +// NetCIDROverlap has been replaced by the `net.cidr_contains` built-in. +var NetCIDROverlap = &Builtin{ + Name: "net.cidr_overlap", + Decl: types.NewFunction( + types.Args( + types.S, + types.S, + ), + types.B, + ), + deprecated: true, +} + +// CastArray checks the underlying type of the input. If it is array or set, an array +// containing the values is returned. If it is not an array, an error is thrown. +var CastArray = &Builtin{ + Name: "cast_array", + Decl: types.NewFunction( + types.Args(types.A), + types.NewArray(nil, types.A), + ), + deprecated: true, +} + +// CastSet checks the underlying type of the input. +// If it is a set, the set is returned. +// If it is an array, the array is returned in set form (all duplicates removed) +// If neither, an error is thrown +var CastSet = &Builtin{ + Name: "cast_set", + Decl: types.NewFunction( + types.Args(types.A), + types.SetOfAny, + ), + deprecated: true, +} + +// CastString returns input if it is a string; if not returns error. +// For formatting variables, see sprintf +var CastString = &Builtin{ + Name: "cast_string", + Decl: types.NewFunction( + types.Args(types.A), + types.S, + ), + deprecated: true, +} + +// CastBoolean returns input if it is a boolean; if not returns error. +var CastBoolean = &Builtin{ + Name: "cast_boolean", + Decl: types.NewFunction( + types.Args(types.A), + types.B, + ), + deprecated: true, +} + +// CastNull returns null if input is null; if not returns error. +var CastNull = &Builtin{ + Name: "cast_null", + Decl: types.NewFunction( + types.Args(types.A), + types.NewNull(), + ), + deprecated: true, +} + +// CastObject returns the given object if it is null; throws an error otherwise +var CastObject = &Builtin{ + Name: "cast_object", + Decl: types.NewFunction( + types.Args(types.A), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + ), + deprecated: true, +} + +// RegexMatchDeprecated declares `re_match` which has been deprecated. Use `regex.match` instead. +var RegexMatchDeprecated = &Builtin{ + Name: "re_match", + Decl: types.NewFunction( + types.Args( + types.S, + types.S, + ), + types.B, + ), + deprecated: true, +} + +// All takes a list and returns true if all of the items +// are true. A collection of length 0 returns true. +var All = &Builtin{ + Name: "all", + Decl: types.NewFunction( + types.Args( + types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + ), + ), + types.B, + ), + deprecated: true, +} + +// Any takes a collection and returns true if any of the items +// is true. A collection of length 0 returns false. +var Any = &Builtin{ + Name: "any", + Decl: types.NewFunction( + types.Args( + types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + ), + ), + types.B, + ), + deprecated: true, +} + +// Builtin represents a built-in function supported by OPA. Every built-in +// function is uniquely identified by a name. +type Builtin struct { + Name string `json:"name"` // Unique name of built-in function, e.g., (arg1,arg2,...,argN) + Description string `json:"description,omitempty"` // Description of what the built-in function does. + + // Categories of the built-in function. Omitted for namespaced + // built-ins, i.e. "array.concat" is taken to be of the "array" category. + // "minus" for example, is part of two categories: numbers and sets. (NOTE(sr): aspirational) + Categories []string `json:"categories,omitempty"` + + Decl *types.Function `json:"decl"` // Built-in function type declaration. + Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset. + Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation. + deprecated bool // Indicates if the built-in has been deprecated. + Nondeterministic bool `json:"nondeterministic,omitempty"` // Indicates if the built-in returns non-deterministic results. +} + +// category is a helper for specifying a Builtin's Categories +func category(cs ...string) []string { + return cs +} + +// Minimal returns a shallow copy of b with the descriptions and categories and +// named arguments stripped out. +func (b *Builtin) Minimal() *Builtin { + cpy := *b + fargs := b.Decl.FuncArgs() + if fargs.Variadic != nil { + cpy.Decl = types.NewVariadicFunction(fargs.Args, fargs.Variadic, b.Decl.Result()) + } else { + cpy.Decl = types.NewFunction(fargs.Args, b.Decl.Result()) + } + cpy.Categories = nil + cpy.Description = "" + return &cpy +} + +// IsDeprecated returns true if the Builtin function is deprecated and will be removed in a future release. +func (b *Builtin) IsDeprecated() bool { + return b.deprecated +} + +// IsDeterministic returns true if the Builtin function returns non-deterministic results. +func (b *Builtin) IsNondeterministic() bool { + return b.Nondeterministic +} + +// Expr creates a new expression for the built-in with the given operands. +func (b *Builtin) Expr(operands ...*Term) *Expr { + ts := make([]*Term, len(operands)+1) + ts[0] = NewTerm(b.Ref()) + for i := range operands { + ts[i+1] = operands[i] + } + return &Expr{ + Terms: ts, + } +} + +// Call creates a new term for the built-in with the given operands. +func (b *Builtin) Call(operands ...*Term) *Term { + call := make(Call, len(operands)+1) + call[0] = NewTerm(b.Ref()) + for i := range operands { + call[i+1] = operands[i] + } + return NewTerm(call) +} + +// Ref returns a Ref that refers to the built-in function. +func (b *Builtin) Ref() Ref { + parts := strings.Split(b.Name, ".") + ref := make(Ref, len(parts)) + ref[0] = VarTerm(parts[0]) + for i := 1; i < len(parts); i++ { + ref[i] = StringTerm(parts[i]) + } + return ref +} + +// IsTargetPos returns true if a variable in the i-th position will be bound by +// evaluating the call expression. +func (b *Builtin) IsTargetPos(i int) bool { + return b.Decl.Arity() == i +} + +func init() { + BuiltinMap = map[string]*Builtin{} + for _, b := range &DefaultBuiltins { + RegisterBuiltin(b) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go b/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go new file mode 100644 index 00000000000..7744a4ca26f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go @@ -0,0 +1,266 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + _ "embed" + "encoding/json" + "fmt" + "io" + "os" + "slices" + "sort" + "strings" + + "github.com/open-policy-agent/opa/internal/semver" + "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities" + caps "github.com/open-policy-agent/opa/v1/capabilities" + "github.com/open-policy-agent/opa/v1/util" +) + +// VersonIndex contains an index from built-in function name, language feature, +// and future rego keyword to version number. During the build, this is used to +// create an index of the minimum version required for the built-in/feature/kw. +type VersionIndex struct { + Builtins map[string]semver.Version `json:"builtins"` + Features map[string]semver.Version `json:"features"` + Keywords map[string]semver.Version `json:"keywords"` +} + +// NOTE(tsandall): this file is generated by internal/cmd/genversionindex/main.go +// and run as part of go:generate. We generate the version index as part of the +// build process because it's relatively expensive to build (it takes ~500ms on +// my machine) and never changes. +// +//go:embed version_index.json +var versionIndexBs []byte + +var minVersionIndex = func() VersionIndex { + var vi VersionIndex + err := json.Unmarshal(versionIndexBs, &vi) + if err != nil { + panic(err) + } + return vi +}() + +// In the compiler, we used this to check that we're OK working with ref heads. +// If this isn't present, we'll fail. This is to ensure that older versions of +// OPA can work with policies that we're compiling -- if they don't know ref +// heads, they wouldn't be able to parse them. +const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes" +const FeatureRefHeads = "rule_head_refs" +const FeatureRegoV1 = "rego_v1" +const FeatureRegoV1Import = "rego_v1_import" + +// Capabilities defines a structure containing data that describes the capabilities +// or features supported by a particular version of OPA. +type Capabilities struct { + Builtins []*Builtin `json:"builtins,omitempty"` + FutureKeywords []string `json:"future_keywords,omitempty"` + WasmABIVersions []WasmABIVersion `json:"wasm_abi_versions,omitempty"` + + // Features is a bit of a mixed bag for checking that an older version of OPA + // is able to do what needs to be done. + // TODO(sr): find better words ^^ + Features []string `json:"features,omitempty"` + + // allow_net is an array of hostnames or IP addresses, that an OPA instance is + // allowed to connect to. + // If omitted, ANY host can be connected to. If empty, NO host can be connected to. + // As of now, this only controls fetching remote refs for using JSON Schemas in + // the type checker. + // TODO(sr): support ports to further restrict connection peers + // TODO(sr): support restricting `http.send` using the same mechanism (see https://github.com/open-policy-agent/opa/issues/3665) + AllowNet []string `json:"allow_net,omitempty"` +} + +// WasmABIVersion captures the Wasm ABI version. Its `Minor` version is indicating +// backwards-compatible changes. +type WasmABIVersion struct { + Version int `json:"version"` + Minor int `json:"minor_version"` +} + +type CapabilitiesOptions struct { + regoVersion RegoVersion +} + +func newCapabilitiesOptions(opts []CapabilitiesOption) CapabilitiesOptions { + co := CapabilitiesOptions{} + for _, opt := range opts { + opt(&co) + } + return co +} + +type CapabilitiesOption func(*CapabilitiesOptions) + +func CapabilitiesRegoVersion(regoVersion RegoVersion) CapabilitiesOption { + return func(o *CapabilitiesOptions) { + o.regoVersion = regoVersion + } +} + +// CapabilitiesForThisVersion returns the capabilities of this version of OPA. +func CapabilitiesForThisVersion(opts ...CapabilitiesOption) *Capabilities { + co := newCapabilitiesOptions(opts) + + f := &Capabilities{} + + for _, vers := range capabilities.ABIVersions() { + f.WasmABIVersions = append(f.WasmABIVersions, WasmABIVersion{Version: vers[0], Minor: vers[1]}) + } + + f.Builtins = make([]*Builtin, len(Builtins)) + copy(f.Builtins, Builtins) + + slices.SortFunc(f.Builtins, func(a, b *Builtin) int { + return strings.Compare(a.Name, b.Name) + }) + + switch co.regoVersion { + case RegoV0, RegoV0CompatV1: + for kw := range allFutureKeywords { + f.FutureKeywords = append(f.FutureKeywords, kw) + } + + f.Features = []string{ + FeatureRefHeadStringPrefixes, + FeatureRefHeads, + FeatureRegoV1Import, + FeatureRegoV1, // Included in v0 capabilities to allow v1 bundles in --v0-compatible mode + } + default: + for kw := range futureKeywords { + f.FutureKeywords = append(f.FutureKeywords, kw) + } + + f.Features = []string{ + FeatureRegoV1, + } + } + + sort.Strings(f.FutureKeywords) + sort.Strings(f.Features) + + return f +} + +// LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r. +func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) { + d := util.NewJSONDecoder(r) + var c Capabilities + return &c, d.Decode(&c) +} + +// LoadCapabilitiesVersion loads a JSON serialized capabilities structure from the specific version. +func LoadCapabilitiesVersion(version string) (*Capabilities, error) { + cvs, err := LoadCapabilitiesVersions() + if err != nil { + return nil, err + } + + for _, cv := range cvs { + if cv == version { + cont, err := caps.FS.ReadFile(cv + ".json") + if err != nil { + return nil, err + } + + return LoadCapabilitiesJSON(bytes.NewReader(cont)) + } + + } + return nil, fmt.Errorf("no capabilities version found %v", version) +} + +// LoadCapabilitiesFile loads a JSON serialized capabilities structure from a file. +func LoadCapabilitiesFile(file string) (*Capabilities, error) { + fd, err := os.Open(file) + if err != nil { + return nil, err + } + defer fd.Close() + return LoadCapabilitiesJSON(fd) +} + +// LoadCapabilitiesVersions loads all capabilities versions +func LoadCapabilitiesVersions() ([]string, error) { + ents, err := caps.FS.ReadDir(".") + if err != nil { + return nil, err + } + + capabilitiesVersions := make([]string, 0, len(ents)) + for _, ent := range ents { + capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1)) + } + return capabilitiesVersions, nil +} + +// MinimumCompatibleVersion returns the minimum compatible OPA version based on +// the built-ins, features, and keywords in c. +func (c *Capabilities) MinimumCompatibleVersion() (string, bool) { + + var maxVersion semver.Version + + // this is the oldest OPA release that includes capabilities + if err := maxVersion.Set("0.17.0"); err != nil { + panic("unreachable") + } + + for _, bi := range c.Builtins { + v, ok := minVersionIndex.Builtins[bi.Name] + if !ok { + return "", false + } + if v.Compare(maxVersion) > 0 { + maxVersion = v + } + } + + for _, kw := range c.FutureKeywords { + v, ok := minVersionIndex.Keywords[kw] + if !ok { + return "", false + } + if v.Compare(maxVersion) > 0 { + maxVersion = v + } + } + + for _, feat := range c.Features { + v, ok := minVersionIndex.Features[feat] + if !ok { + return "", false + } + if v.Compare(maxVersion) > 0 { + maxVersion = v + } + } + + return maxVersion.String(), true +} + +func (c *Capabilities) ContainsFeature(feature string) bool { + return slices.Contains(c.Features, feature) +} + +// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name +// will be overwritten. +func (c *Capabilities) addBuiltinSorted(bi *Builtin) { + i := sort.Search(len(c.Builtins), func(x int) bool { + return c.Builtins[x].Name >= bi.Name + }) + if i < len(c.Builtins) && bi.Name == c.Builtins[i].Name { + c.Builtins[i] = bi + return + } + c.Builtins = append(c.Builtins, nil) + copy(c.Builtins[i+1:], c.Builtins[i:]) + c.Builtins[i] = bi +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/check.go b/vendor/github.com/open-policy-agent/opa/v1/ast/check.go new file mode 100644 index 00000000000..ecfb320649a --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/check.go @@ -0,0 +1,1327 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "sort" + "strings" + + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +type varRewriter func(Ref) Ref + +// exprChecker defines the interface for executing type checking on a single +// expression. The exprChecker must update the provided TypeEnv with inferred +// types of vars. +type exprChecker func(*TypeEnv, *Expr) *Error + +// typeChecker implements type checking on queries and rules. Errors are +// accumulated on the typeChecker so that a single run can report multiple +// issues. +type typeChecker struct { + builtins map[string]*Builtin + required *Capabilities + errs Errors + exprCheckers map[string]exprChecker + varRewriter varRewriter + ss *SchemaSet + allowNet []string + input types.Type + allowUndefinedFuncs bool + schemaTypes map[string]types.Type +} + +// newTypeChecker returns a new typeChecker object that has no errors. +func newTypeChecker() *typeChecker { + return &typeChecker{ + exprCheckers: map[string]exprChecker{ + "eq": checkExprEq, + }, + } +} + +func (tc *typeChecker) newEnv(exist *TypeEnv) *TypeEnv { + if exist != nil { + return exist.wrap() + } + env := newTypeEnv(tc.copy) + if tc.input != nil { + env.tree.Put(InputRootRef, tc.input) + } + return env +} + +func (tc *typeChecker) copy() *typeChecker { + return newTypeChecker(). + WithVarRewriter(tc.varRewriter). + WithSchemaSet(tc.ss). + WithSchemaTypes(tc.schemaTypes). + WithAllowNet(tc.allowNet). + WithInputType(tc.input). + WithAllowUndefinedFunctionCalls(tc.allowUndefinedFuncs). + WithBuiltins(tc.builtins). + WithRequiredCapabilities(tc.required) +} + +func (tc *typeChecker) WithRequiredCapabilities(c *Capabilities) *typeChecker { + tc.required = c + return tc +} + +func (tc *typeChecker) WithBuiltins(builtins map[string]*Builtin) *typeChecker { + tc.builtins = builtins + return tc +} + +func (tc *typeChecker) WithSchemaSet(ss *SchemaSet) *typeChecker { + tc.ss = ss + return tc +} + +func (tc *typeChecker) WithSchemaTypes(schemaTypes map[string]types.Type) *typeChecker { + tc.schemaTypes = schemaTypes + return tc +} + +func (tc *typeChecker) WithAllowNet(hosts []string) *typeChecker { + tc.allowNet = hosts + return tc +} + +func (tc *typeChecker) WithVarRewriter(f varRewriter) *typeChecker { + tc.varRewriter = f + return tc +} + +func (tc *typeChecker) WithInputType(tpe types.Type) *typeChecker { + tc.input = tpe + return tc +} + +// WithAllowUndefinedFunctionCalls sets the type checker to allow references to undefined functions. +// Additionally, the 'CheckUndefinedFuncs' and 'CheckSafetyRuleBodies' compiler stages are skipped. +func (tc *typeChecker) WithAllowUndefinedFunctionCalls(allow bool) *typeChecker { + tc.allowUndefinedFuncs = allow + return tc +} + +// Env returns a type environment for the specified built-ins with any other +// global types configured on the checker. In practice, this is the default +// environment that other statements will be checked against. +func (tc *typeChecker) Env(builtins map[string]*Builtin) *TypeEnv { + env := tc.newEnv(nil) + for _, bi := range builtins { + env.tree.Put(bi.Ref(), bi.Decl) + } + return env +} + +// CheckBody runs type checking on the body and returns a TypeEnv if no errors +// are found. The resulting TypeEnv wraps the provided one. The resulting +// TypeEnv will be able to resolve types of vars contained in the body. +func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) { + + errors := []*Error{} + env = tc.newEnv(env) + vis := newRefChecker(env, tc.varRewriter) + + WalkExprs(body, func(expr *Expr) bool { + + closureErrs := tc.checkClosures(env, expr) + for _, err := range closureErrs { + errors = append(errors, err) + } + + hasClosureErrors := len(closureErrs) > 0 + + // reset errors from previous iteration + vis.errs = nil + NewGenericVisitor(vis.Visit).Walk(expr) + for _, err := range vis.errs { + errors = append(errors, err) + } + + hasRefErrors := len(vis.errs) > 0 + + if err := tc.checkExpr(env, expr); err != nil { + // Suppress this error if a more actionable one has occurred. In + // this case, if an error occurred in a ref or closure contained in + // this expression, and the error is due to a nil type, then it's + // likely to be the result of the more specific error. + skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err) + if !skip { + errors = append(errors, err) + } + } + return true + }) + + tc.err(errors) + return env, errors +} + +// CheckTypes runs type checking on the rules returns a TypeEnv if no errors +// are found. The resulting TypeEnv wraps the provided one. The resulting +// TypeEnv will be able to resolve types of refs that refer to rules. +func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T, as *AnnotationSet) (*TypeEnv, Errors) { + env = tc.newEnv(env) + for _, s := range sorted { + tc.checkRule(env, as, s.(*Rule)) + } + tc.errs.Sort() + return env, tc.errs +} + +func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors { + var result Errors + WalkClosures(expr, func(x interface{}) bool { + switch x := x.(type) { + case *ArrayComprehension: + _, errs := tc.copy().CheckBody(env, x.Body) + if len(errs) > 0 { + result = errs + return true + } + case *SetComprehension: + _, errs := tc.copy().CheckBody(env, x.Body) + if len(errs) > 0 { + result = errs + return true + } + case *ObjectComprehension: + _, errs := tc.copy().CheckBody(env, x.Body) + if len(errs) > 0 { + result = errs + return true + } + } + return false + }) + return result +} + +func (tc *typeChecker) getSchemaType(schemaAnnot *SchemaAnnotation, rule *Rule) (types.Type, *Error) { + if tc.schemaTypes == nil { + tc.schemaTypes = make(map[string]types.Type) + } + + if refType, exists := tc.schemaTypes[schemaAnnot.Schema.String()]; exists { + return refType, nil + } + + refType, err := processAnnotation(tc.ss, schemaAnnot, rule, tc.allowNet) + if err != nil { + return nil, err + } + + if refType == nil { + return nil, nil + } + + tc.schemaTypes[schemaAnnot.Schema.String()] = refType + return refType, nil + +} + +func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { + + env = env.wrap() + + schemaAnnots := getRuleAnnotation(as, rule) + for _, schemaAnnot := range schemaAnnots { + refType, err := tc.getSchemaType(schemaAnnot, rule) + if err != nil { + tc.err([]*Error{err}) + continue + } + + ref := schemaAnnot.Path + // if we do not have a ref or a reftype, we should not evaluate this rule. + if ref == nil || refType == nil { + continue + } + + prefixRef, t := getPrefix(env, ref) + if t == nil || len(prefixRef) == len(ref) { + env.tree.Put(ref, refType) + } else { + newType, err := override(ref[len(prefixRef):], t, refType, rule) + if err != nil { + tc.err([]*Error{err}) + continue + } + env.tree.Put(prefixRef, newType) + } + } + + cpy, err := tc.CheckBody(env, rule.Body) + env = env.next + path := rule.Ref() + + if len(err) > 0 { + // if the rule/function contains an error, add it to the type env so + // that expressions that refer to this rule/function do not encounter + // type errors. + env.tree.Put(path, types.A) + return + } + + var tpe types.Type + + if len(rule.Head.Args) > 0 { + // If args are not referred to in body, infer as any. + WalkVars(rule.Head.Args, func(v Var) bool { + if cpy.GetByValue(v) == nil { + cpy.tree.PutOne(v, types.A) + } + return false + }) + + // Construct function type. + args := make([]types.Type, len(rule.Head.Args)) + for i := range len(rule.Head.Args) { + args[i] = cpy.GetByValue(rule.Head.Args[i].Value) + } + + f := types.NewFunction(args, cpy.Get(rule.Head.Value)) + + tpe = f + } else { + switch rule.Head.RuleKind() { + case SingleValue: + typeV := cpy.GetByValue(rule.Head.Value.Value) + if !path.IsGround() { + // e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z] + objPath := path.DynamicSuffix() + path = path.GroundPrefix() + + var err error + tpe, err = nestedObject(cpy, objPath, typeV) + if err != nil { + tc.err([]*Error{NewError(TypeErr, rule.Head.Location, err.Error())}) //nolint:govet + tpe = nil + } + } else if typeV != nil { + tpe = typeV + } + case MultiValue: + typeK := cpy.GetByValue(rule.Head.Key.Value) + if typeK != nil { + tpe = types.NewSet(typeK) + } + } + } + + if tpe != nil { + env.tree.Insert(path, tpe, env) + } +} + +// nestedObject creates a nested structure of object types, where each term on path corresponds to a level in the +// nesting. Each term in the path only contributes to the dynamic portion of its corresponding object. +func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) { + if len(path) == 0 { + return tpe, nil + } + + k := path[0] + typeV, err := nestedObject(env, path[1:], tpe) + if err != nil { + return nil, err + } + if typeV == nil { + return nil, nil + } + + var dynamicProperty *types.DynamicProperty + typeK := env.GetByValue(k.Value) + if typeK == nil { + return nil, nil + } + dynamicProperty = types.NewDynamicProperty(typeK, typeV) + + return types.NewObject(nil, dynamicProperty), nil +} + +func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error { + if err := tc.checkExprWith(env, expr, 0); err != nil { + return err + } + if !expr.IsCall() { + return nil + } + + operator := expr.Operator().String() + + // If the type checker wasn't provided with a required capabilities + // structure then just skip. In some cases, type checking might be run + // without the need to record what builtins are required. + if tc.required != nil && tc.builtins != nil { + if bi, ok := tc.builtins[operator]; ok { + tc.required.addBuiltinSorted(bi) + } + } + + checker := tc.exprCheckers[operator] + if checker != nil { + return checker(env, expr) + } + + return tc.checkExprBuiltin(env, expr) +} + +func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error { + + args := expr.Operands() + pre := getArgTypes(env, args) + + // NOTE(tsandall): undefined functions will have been caught earlier in the + // compiler. We check for undefined functions before the safety check so + // that references to non-existent functions result in undefined function + // errors as opposed to unsafe var errors. + // + // We cannot run type checking before the safety check because part of the + // type checker relies on reordering (in particular for references to local + // vars). + name := expr.Operator() + tpe := env.GetByRef(name) + + if tpe == nil { + if tc.allowUndefinedFuncs { + return nil + } + return NewError(TypeErr, expr.Location, "undefined function %v", name) + } + + // check if the expression refers to a function that contains an error + _, ok := tpe.(types.Any) + if ok { + return nil + } + + ftpe, ok := tpe.(*types.Function) + if !ok { + return NewError(TypeErr, expr.Location, "undefined function %v", name) + } + + fargs := ftpe.FuncArgs() + namedFargs := ftpe.NamedFuncArgs() + + if ftpe.Result() != nil { + fargs.Args = append(fargs.Args, ftpe.Result()) + namedFargs.Args = append(namedFargs.Args, ftpe.NamedResult()) + } + + if len(args) > len(fargs.Args) && fargs.Variadic == nil { + return newArgError(expr.Location, name, "too many arguments", pre, namedFargs) + } + + if len(args) < len(ftpe.FuncArgs().Args) { + return newArgError(expr.Location, name, "too few arguments", pre, namedFargs) + } + + for i := range args { + if !unify1(env, args[i], fargs.Arg(i), false) { + post := make([]types.Type, len(args)) + for i := range args { + post[i] = env.GetByValue(args[i].Value) + } + return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs) + } + } + + return nil +} + +func checkExprEq(env *TypeEnv, expr *Expr) *Error { + + pre := getArgTypes(env, expr.Operands()) + + if len(pre) < Equality.Decl.Arity() { + return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, Equality.Decl.FuncArgs()) + } + + if Equality.Decl.Arity() < len(pre) { + return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, Equality.Decl.FuncArgs()) + } + + a, b := expr.Operand(0), expr.Operand(1) + typeA, typeB := env.GetByValue(a.Value), env.GetByValue(b.Value) + + if !unify2(env, a, typeA, b, typeB) { + err := NewError(TypeErr, expr.Location, "match error") + err.Details = &UnificationErrDetail{ + Left: typeA, + Right: typeB, + } + return err + } + + return nil +} + +func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error { + if i == len(expr.With) { + return nil + } + + target, value := expr.With[i].Target, expr.With[i].Value + targetType, valueType := env.GetByValue(target.Value), env.GetByValue(value.Value) + + if t, ok := targetType.(*types.Function); ok { // built-in function replacement + switch v := valueType.(type) { + case *types.Function: // ...by function + if !unifies(targetType, valueType) { + return newArgError(expr.With[i].Loc(), target.Value.(Ref), "arity mismatch", v.FuncArgs().Args, t.NamedFuncArgs()) + } + default: // ... by value, nothing to check + } + } + + return tc.checkExprWith(env, expr, i+1) +} + +func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool { + + nilA := types.Nil(typeA) + nilB := types.Nil(typeB) + + if nilA && !nilB { + return unify1(env, a, typeB, false) + } else if nilB && !nilA { + return unify1(env, b, typeA, false) + } else if !nilA && !nilB { + return unifies(typeA, typeB) + } + + switch a.Value.(type) { + case *Array: + return unify2Array(env, a, b) + case *object: + return unify2Object(env, a, b) + case Var: + switch b.Value.(type) { + case Var: + return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false) + case *Array: + return unify2Array(env, b, a) + case *object: + return unify2Object(env, b, a) + } + } + + return false +} + +func unify2Array(env *TypeEnv, a *Term, b *Term) bool { + arr := a.Value.(*Array) + switch bv := b.Value.(type) { + case *Array: + if arr.Len() == bv.Len() { + for i := range arr.Len() { + if !unify2(env, arr.Elem(i), env.GetByValue(arr.Elem(i).Value), bv.Elem(i), env.GetByValue(bv.Elem(i).Value)) { + return false + } + } + return true + } + case Var: + return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false) + } + return false +} + +func unify2Object(env *TypeEnv, a *Term, b *Term) bool { + obj := a.Value.(Object) + switch bv := b.Value.(type) { + case *object: + cv := obj.Intersect(bv) + if obj.Len() == bv.Len() && bv.Len() == len(cv) { + for i := range cv { + if !unify2(env, cv[i][1], env.GetByValue(cv[i][1].Value), cv[i][2], env.GetByValue(cv[i][2].Value)) { + return false + } + } + return true + } + case Var: + return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false) + } + return false +} + +func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool { + switch v := term.Value.(type) { + case *Array: + switch tpe := tpe.(type) { + case *types.Array: + return unify1Array(env, v, tpe, union) + case types.Any: + if types.Compare(tpe, types.A) == 0 { + for i := range v.Len() { + unify1(env, v.Elem(i), types.A, true) + } + return true + } + unifies := false + for i := range tpe { + unifies = unify1(env, term, tpe[i], true) || unifies + } + return unifies + } + return false + case *object: + switch tpe := tpe.(type) { + case *types.Object: + return unify1Object(env, v, tpe, union) + case types.Any: + if types.Compare(tpe, types.A) == 0 { + v.Foreach(func(key, value *Term) { + unify1(env, key, types.A, true) + unify1(env, value, types.A, true) + }) + return true + } + unifies := false + for i := range tpe { + unifies = unify1(env, term, tpe[i], true) || unifies + } + return unifies + } + return false + case Set: + switch tpe := tpe.(type) { + case *types.Set: + return unify1Set(env, v, tpe, union) + case types.Any: + if types.Compare(tpe, types.A) == 0 { + v.Foreach(func(elem *Term) { + unify1(env, elem, types.A, true) + }) + return true + } + unifies := false + for i := range tpe { + unifies = unify1(env, term, tpe[i], true) || unifies + } + return unifies + } + return false + case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return unifies(env.GetByValue(v), tpe) + case Var: + if !union { + if exist := env.GetByValue(v); exist != nil { + return unifies(exist, tpe) + } + env.tree.PutOne(term.Value, tpe) + } else { + env.tree.PutOne(term.Value, types.Or(env.GetByValue(v), tpe)) + } + return true + default: + if !IsConstant(v) { + panic("unreachable") + } + return unifies(env.GetByValue(term.Value), tpe) + } +} + +func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool { + if val.Len() != tpe.Len() && tpe.Dynamic() == nil { + return false + } + for i := range val.Len() { + if !unify1(env, val.Elem(i), tpe.Select(i), union) { + return false + } + } + return true +} + +func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool { + if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil { + return false + } + stop := val.Until(func(k, v *Term) bool { + if IsConstant(k.Value) { + if child := selectConstant(tpe, k); child != nil { + if !unify1(env, v, child, union) { + return true + } + } else { + return true + } + } else { + // Inferring type of value under dynamic key would involve unioning + // with all property values of tpe whose keys unify. For now, type + // these values as Any. We can investigate stricter inference in + // the future. + unify1(env, v, types.A, union) + } + return false + }) + return !stop +} + +func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool { + of := types.Values(tpe) + return !val.Until(func(elem *Term) bool { + return !unify1(env, elem, of, union) + }) +} + +func (tc *typeChecker) err(errors []*Error) { + tc.errs = append(tc.errs, errors...) +} + +type refChecker struct { + env *TypeEnv + errs Errors + varRewriter varRewriter +} + +func rewriteVarsNop(node Ref) Ref { + return node +} + +func newRefChecker(env *TypeEnv, f varRewriter) *refChecker { + if f == nil { + f = rewriteVarsNop + } + + return &refChecker{ + env: env, + errs: nil, + varRewriter: f, + } +} + +func (rc *refChecker) Visit(x interface{}) bool { + switch x := x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return true + case *Expr: + switch terms := x.Terms.(type) { + case []*Term: + for i := 1; i < len(terms); i++ { + NewGenericVisitor(rc.Visit).Walk(terms[i]) + } + return true + case *Term: + NewGenericVisitor(rc.Visit).Walk(terms) + return true + } + case Ref: + if err := rc.checkApply(rc.env, x); err != nil { + rc.errs = append(rc.errs, err) + return true + } + if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil { + rc.errs = append(rc.errs, err) + } + } + return false +} + +func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error { + if tpe, ok := curr.GetByRef(ref).(*types.Function); ok { + // NOTE(sr): We don't support first-class functions, except for `with`. + return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe) + } + + return nil +} + +func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error { + + if idx == len(ref) { + return nil + } + + head := ref[idx] + + // NOTE(sr): as long as package statements are required, this isn't possible: + // the shortest possible rule ref is data.a.b (b is idx 2), idx 1 and 2 need to + // be strings or vars. + if idx == 1 || idx == 2 { + switch head.Value.(type) { + case Var, String: // OK + default: + have := rc.env.GetByValue(head.Value) + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node)) + } + } + + if _, ok := head.Value.(Var); ok && idx != 0 { + tpe := types.Keys(rc.env.getRefRecExtent(node)) + if exist := rc.env.GetByValue(head.Value); exist != nil { + if !unifies(tpe, exist) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node)) + } + } else { + rc.env.tree.PutOne(head.Value, tpe) + } + } + + child := node.Child(head.Value) + if child == nil { + // NOTE(sr): idx is reset on purpose: we start over + switch { + case curr.next != nil: + next := curr.next + return rc.checkRef(next, next.tree, ref, 0) + + case RootDocumentNames.Contains(ref[0]): + if idx != 0 { + node.Children().Iter(func(_ Value, child *typeTreeNode) bool { + _ = rc.checkRef(curr, child, ref, idx+1) // ignore error + return false + }) + return nil + } + return rc.checkRefLeaf(types.A, ref, 1) + + default: + return rc.checkRefLeaf(types.A, ref, 0) + } + } + + if child.Leaf() { + return rc.checkRefLeaf(child.Value(), ref, idx+1) + } + + return rc.checkRef(curr, child, ref, idx+1) +} + +func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error { + + if idx == len(ref) { + return nil + } + + head := ref[idx] + + keys := types.Keys(tpe) + if keys == nil { + return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe) + } + + switch value := head.Value.(type) { + + case Var: + if exist := rc.env.GetByValue(value); exist != nil { + if !unifies(exist, keys) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) + } + } else { + rc.env.tree.PutOne(value, types.Keys(tpe)) + } + + case Ref: + if exist := rc.env.Get(value); exist != nil { + if !unifies(exist, keys) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) + } + } + + case *Array, Object, Set: + if !unify1(rc.env, head, keys, false) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil) + } + + default: + child := selectConstant(tpe, head) + if child == nil { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe)) + } + return rc.checkRefLeaf(child, ref, idx+1) + } + + return rc.checkRefLeaf(types.Values(tpe), ref, idx+1) +} + +func unifies(a, b types.Type) bool { + + if a == nil || b == nil { + return false + } + + anyA, ok1 := a.(types.Any) + if ok1 { + if unifiesAny(anyA, b) { + return true + } + } + + anyB, ok2 := b.(types.Any) + if ok2 { + if unifiesAny(anyB, a) { + return true + } + } + + if ok1 || ok2 { + return false + } + + switch a := a.(type) { + case types.Null: + _, ok := b.(types.Null) + return ok + case types.Boolean: + _, ok := b.(types.Boolean) + return ok + case types.Number: + _, ok := b.(types.Number) + return ok + case types.String: + _, ok := b.(types.String) + return ok + case *types.Array: + b, ok := b.(*types.Array) + if !ok { + return false + } + return unifiesArrays(a, b) + case *types.Object: + b, ok := b.(*types.Object) + if !ok { + return false + } + return unifiesObjects(a, b) + case *types.Set: + b, ok := b.(*types.Set) + if !ok { + return false + } + return unifies(types.Values(a), types.Values(b)) + case *types.Function: + // NOTE(sr): variadic functions can only be internal ones, and we've forbidden + // their replacement via `with`; so we disregard variadic here + if types.Arity(a) == types.Arity(b) { + b := b.(*types.Function) + for i := range a.FuncArgs().Args { + if !unifies(a.FuncArgs().Arg(i), b.FuncArgs().Arg(i)) { + return false + } + } + return true + } + return false + default: + panic("unreachable") + } +} + +func unifiesAny(a types.Any, b types.Type) bool { + if _, ok := b.(*types.Function); ok { + return false + } + for i := range a { + if unifies(a[i], b) { + return true + } + } + return len(a) == 0 +} + +func unifiesArrays(a, b *types.Array) bool { + + if !unifiesArraysStatic(a, b) { + return false + } + + if !unifiesArraysStatic(b, a) { + return false + } + + return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic()) +} + +func unifiesArraysStatic(a, b *types.Array) bool { + if a.Len() != 0 { + for i := range a.Len() { + if !unifies(a.Select(i), b.Select(i)) { + return false + } + } + } + return true +} + +func unifiesObjects(a, b *types.Object) bool { + if !unifiesObjectsStatic(a, b) { + return false + } + + if !unifiesObjectsStatic(b, a) { + return false + } + + return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue()) +} + +func unifiesObjectsStatic(a, b *types.Object) bool { + for _, k := range a.Keys() { + if !unifies(a.Select(k), b.Select(k)) { + return false + } + } + return true +} + +// typeErrorCause defines an interface to determine the reason for a type +// error. The type error details implement this interface so that type checking +// can report more actionable errors. +type typeErrorCause interface { + nilType() bool +} + +func causedByNilType(err *Error) bool { + cause, ok := err.Details.(typeErrorCause) + if !ok { + return false + } + return cause.nilType() +} + +// ArgErrDetail represents a generic argument error. +type ArgErrDetail struct { + Have []types.Type `json:"have"` + Want types.FuncArgs `json:"want"` +} + +// Lines returns the string representation of the detail. +func (d *ArgErrDetail) Lines() []string { + lines := make([]string, 2) + lines[0] = "have: " + formatArgs(d.Have) + lines[1] = "want: " + d.Want.String() + return lines +} + +func (d *ArgErrDetail) nilType() bool { + for i := range d.Have { + if types.Nil(d.Have[i]) { + return true + } + } + return false +} + +// UnificationErrDetail describes a type mismatch error when two values are +// unified (e.g., x = [1,2,y]). +type UnificationErrDetail struct { + Left types.Type `json:"a"` + Right types.Type `json:"b"` +} + +func (a *UnificationErrDetail) nilType() bool { + return types.Nil(a.Left) || types.Nil(a.Right) +} + +// Lines returns the string representation of the detail. +func (a *UnificationErrDetail) Lines() []string { + lines := make([]string, 2) + lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left)) + lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right)) + return lines +} + +// RefErrUnsupportedDetail describes an undefined reference error where the +// referenced value does not support dereferencing (e.g., scalars). +type RefErrUnsupportedDetail struct { + Ref Ref `json:"ref"` // invalid ref + Pos int `json:"pos"` // invalid element + Have types.Type `json:"have"` // referenced type +} + +// Lines returns the string representation of the detail. +func (r *RefErrUnsupportedDetail) Lines() []string { + lines := []string{ + r.Ref.String(), + strings.Repeat("^", len(r.Ref[:r.Pos+1].String())), + fmt.Sprintf("have: %v", r.Have), + } + return lines +} + +// RefErrInvalidDetail describes an undefined reference error where the referenced +// value does not support the reference operand (e.g., missing object key, +// invalid key type, etc.) +type RefErrInvalidDetail struct { + Ref Ref `json:"ref"` // invalid ref + Pos int `json:"pos"` // invalid element + Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements) + Want types.Type `json:"want"` // allowed type (for non-object values) + OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys) +} + +// Lines returns the string representation of the detail. +func (r *RefErrInvalidDetail) Lines() []string { + lines := []string{r.Ref.String()} + offset := len(r.Ref[:r.Pos].String()) + 1 + pad := strings.Repeat(" ", offset) + lines = append(lines, pad+"^") + if r.Have != nil { + lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have)) + } else { + lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos])) + } + if len(r.OneOf) > 0 { + lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf)) + } else { + lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want)) + } + return lines +} + +func formatArgs(args []types.Type) string { + buf := make([]string, len(args)) + for i := range args { + buf[i] = types.Sprint(args[i]) + } + return "(" + strings.Join(buf, ", ") + ")" +} + +func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error { + err := newRefError(loc, ref) + err.Details = &RefErrInvalidDetail{ + Ref: ref, + Pos: idx, + Have: have, + Want: want, + OneOf: oneOf, + } + return err +} + +func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error { + err := newRefError(loc, ref) + err.Details = &RefErrUnsupportedDetail{ + Ref: ref, + Pos: idx, + Have: have, + } + return err +} + +func newRefError(loc *Location, ref Ref) *Error { + return NewError(TypeErr, loc, "undefined ref: %v", ref) +} + +func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want types.FuncArgs) *Error { + err := NewError(TypeErr, loc, "%v: %v", builtinName, msg) + err.Details = &ArgErrDetail{ + Have: have, + Want: want, + } + return err +} + +func getOneOfForNode(node *typeTreeNode) (result []Value) { + node.Children().Iter(func(k Value, _ *typeTreeNode) bool { + result = append(result, k) + return false + }) + + sortValueSlice(result) + return result +} + +func getOneOfForType(tpe types.Type) (result []Value) { + switch tpe := tpe.(type) { + case *types.Object: + for _, k := range tpe.Keys() { + v, err := InterfaceToValue(k) + if err != nil { + panic(err) + } + result = append(result, v) + } + + case types.Any: + for _, object := range tpe { + objRes := getOneOfForType(object) + result = append(result, objRes...) + } + } + + result = removeDuplicate(result) + sortValueSlice(result) + return result +} + +func sortValueSlice(sl []Value) { + sort.Slice(sl, func(i, j int) bool { + return sl[i].Compare(sl[j]) < 0 + }) +} + +func removeDuplicate(list []Value) []Value { + seen := make(map[Value]bool) + var newResult []Value + for _, item := range list { + if !seen[item] { + newResult = append(newResult, item) + seen[item] = true + } + } + return newResult +} + +func getArgTypes(env *TypeEnv, args []*Term) []types.Type { + pre := make([]types.Type, len(args)) + for i := range args { + pre[i] = env.Get(args[i]) + } + return pre +} + +// getPrefix returns the shortest prefix of ref that exists in env +func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) { + if len(ref) == 1 { + t := env.Get(ref) + if t != nil { + return ref, t + } + } + for i := 1; i < len(ref); i++ { + t := env.Get(ref[:i]) + if t != nil { + return ref[:i], t + } + } + return nil, nil +} + +// override takes a type t and returns a type obtained from t where the path represented by ref within it has type o (overriding the original type of that path) +func override(ref Ref, t types.Type, o types.Type, rule *Rule) (types.Type, *Error) { + var newStaticProps []*types.StaticProperty + obj, ok := t.(*types.Object) + if !ok { + newType, err := getObjectType(ref, o, rule, types.NewDynamicProperty(types.A, types.A)) + if err != nil { + return nil, err + } + return newType, nil + } + found := false + if ok { + staticProps := obj.StaticProperties() + for _, prop := range staticProps { + valueCopy := prop.Value + key, err := InterfaceToValue(prop.Key) + if err != nil { + return nil, NewError(TypeErr, rule.Location, "unexpected error in override: %s", err.Error()) + } + if len(ref) > 0 && ref[0].Value.Compare(key) == 0 { + found = true + if len(ref) == 1 { + valueCopy = o + } else { + newVal, err := override(ref[1:], valueCopy, o, rule) + if err != nil { + return nil, err + } + valueCopy = newVal + } + } + newStaticProps = append(newStaticProps, types.NewStaticProperty(prop.Key, valueCopy)) + } + } + + // ref[0] is not a top-level key in staticProps, so it must be added + if !found { + newType, err := getObjectType(ref, o, rule, obj.DynamicProperties()) + if err != nil { + return nil, err + } + newStaticProps = append(newStaticProps, newType.StaticProperties()...) + } + return types.NewObject(newStaticProps, obj.DynamicProperties()), nil +} + +func getKeys(ref Ref, rule *Rule) ([]interface{}, *Error) { + keys := []interface{}{} + for _, refElem := range ref { + key, err := JSON(refElem.Value) + if err != nil { + return nil, NewError(TypeErr, rule.Location, "error getting key from value: %s", err.Error()) + } + keys = append(keys, key) + } + return keys, nil +} + +func getObjectTypeRec(keys []interface{}, o types.Type, d *types.DynamicProperty) *types.Object { + if len(keys) == 1 { + staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], o)} + return types.NewObject(staticProps, d) + } + + staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], getObjectTypeRec(keys[1:], o, d))} + return types.NewObject(staticProps, d) +} + +func getObjectType(ref Ref, o types.Type, rule *Rule, d *types.DynamicProperty) (*types.Object, *Error) { + keys, err := getKeys(ref, rule) + if err != nil { + return nil, err + } + return getObjectTypeRec(keys, o, d), nil +} + +func getRuleAnnotation(as *AnnotationSet, rule *Rule) (result []*SchemaAnnotation) { + + for _, x := range as.GetSubpackagesScope(rule.Module.Package.Path) { + result = append(result, x.Schemas...) + } + + if x := as.GetPackageScope(rule.Module.Package); x != nil { + result = append(result, x.Schemas...) + } + + if x := as.GetDocumentScope(rule.Ref().GroundPrefix()); x != nil { + result = append(result, x.Schemas...) + } + + for _, x := range as.GetRuleScope(rule) { + result = append(result, x.Schemas...) + } + + return result +} + +func processAnnotation(ss *SchemaSet, annot *SchemaAnnotation, rule *Rule, allowNet []string) (types.Type, *Error) { + + var schema interface{} + + if annot.Schema != nil { + if ss == nil { + return nil, nil + } + schema = ss.Get(annot.Schema) + if schema == nil { + return nil, NewError(TypeErr, rule.Location, "undefined schema: %v", annot.Schema) + } + } else if annot.Definition != nil { + schema = *annot.Definition + } + + tpe, err := loadSchema(schema, allowNet) + if err != nil { + return nil, NewError(TypeErr, rule.Location, err.Error()) //nolint:govet + } + + return tpe, nil +} + +func errAnnotationRedeclared(a *Annotations, other *Location) *Error { + return NewError(TypeErr, a.Location, "%v annotation redeclared: %v", a.Scope, other) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go new file mode 100644 index 00000000000..452c6365a3a --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go @@ -0,0 +1,444 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "encoding/json" + "fmt" + "math/big" +) + +// Compare returns an integer indicating whether two AST values are less than, +// equal to, or greater than each other. +// +// If a is less than b, the return value is negative. If a is greater than b, +// the return value is positive. If a is equal to b, the return value is zero. +// +// Different types are never equal to each other. For comparison purposes, types +// are sorted as follows: +// +// nil < Null < Boolean < Number < String < Var < Ref < Array < Object < Set < +// ArrayComprehension < ObjectComprehension < SetComprehension < Expr < SomeDecl +// < With < Body < Rule < Import < Package < Module. +// +// Arrays and Refs are equal if and only if both a and b have the same length +// and all corresponding elements are equal. If one element is not equal, the +// return value is the same as for the first differing element. If all elements +// are equal but a and b have different lengths, the shorter is considered less +// than the other. +// +// Objects are considered equal if and only if both a and b have the same sorted +// (key, value) pairs and are of the same length. Other comparisons are +// consistent but not defined. +// +// Sets are considered equal if and only if the symmetric difference of a and b +// is empty. +// Other comparisons are consistent but not defined. +func Compare(a, b interface{}) int { + + if t, ok := a.(*Term); ok { + if t == nil { + a = nil + } else { + a = t.Value + } + } + + if t, ok := b.(*Term); ok { + if t == nil { + b = nil + } else { + b = t.Value + } + } + + if a == nil { + if b == nil { + return 0 + } + return -1 + } + if b == nil { + return 1 + } + + sortA := sortOrder(a) + sortB := sortOrder(b) + + if sortA < sortB { + return -1 + } else if sortB < sortA { + return 1 + } + + switch a := a.(type) { + case Null: + return 0 + case Boolean: + b := b.(Boolean) + if a.Equal(b) { + return 0 + } + if !a { + return -1 + } + return 1 + case Number: + if ai, err := json.Number(a).Int64(); err == nil { + if bi, err := json.Number(b.(Number)).Int64(); err == nil { + if ai == bi { + return 0 + } + if ai < bi { + return -1 + } + return 1 + } + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var bigA, bigB *big.Rat + fa, ok := new(big.Float).SetString(string(a)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + bigA = new(big.Rat).SetInt64(0) + } + } + if bigA == nil { + bigA, ok = new(big.Rat).SetString(string(a)) + if !ok { + panic("illegal value") + } + } + + fb, ok := new(big.Float).SetString(string(b.(Number))) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + bigB = new(big.Rat).SetInt64(0) + } + } + if bigB == nil { + bigB, ok = new(big.Rat).SetString(string(b.(Number))) + if !ok { + panic("illegal value") + } + } + + return bigA.Cmp(bigB) + case String: + b := b.(String) + if a.Equal(b) { + return 0 + } + if a < b { + return -1 + } + return 1 + case Var: + return VarCompare(a, b.(Var)) + case Ref: + b := b.(Ref) + return termSliceCompare(a, b) + case *Array: + b := b.(*Array) + return termSliceCompare(a.elems, b.elems) + case *lazyObj: + return Compare(a.force(), b) + case *object: + if x, ok := b.(*lazyObj); ok { + b = x.force() + } + b := b.(*object) + return a.Compare(b) + case Set: + b := b.(Set) + return a.Compare(b) + case *ArrayComprehension: + b := b.(*ArrayComprehension) + if cmp := Compare(a.Term, b.Term); cmp != 0 { + return cmp + } + return a.Body.Compare(b.Body) + case *ObjectComprehension: + b := b.(*ObjectComprehension) + if cmp := Compare(a.Key, b.Key); cmp != 0 { + return cmp + } + if cmp := Compare(a.Value, b.Value); cmp != 0 { + return cmp + } + return a.Body.Compare(b.Body) + case *SetComprehension: + b := b.(*SetComprehension) + if cmp := Compare(a.Term, b.Term); cmp != 0 { + return cmp + } + return a.Body.Compare(b.Body) + case Call: + b := b.(Call) + return termSliceCompare(a, b) + case *Expr: + b := b.(*Expr) + return a.Compare(b) + case *SomeDecl: + b := b.(*SomeDecl) + return a.Compare(b) + case *Every: + b := b.(*Every) + return a.Compare(b) + case *With: + b := b.(*With) + return a.Compare(b) + case Body: + b := b.(Body) + return a.Compare(b) + case *Head: + b := b.(*Head) + return a.Compare(b) + case *Rule: + b := b.(*Rule) + return a.Compare(b) + case Args: + b := b.(Args) + return termSliceCompare(a, b) + case *Import: + b := b.(*Import) + return a.Compare(b) + case *Package: + b := b.(*Package) + return a.Compare(b) + case *Annotations: + b := b.(*Annotations) + return a.Compare(b) + case *Module: + b := b.(*Module) + return a.Compare(b) + } + panic(fmt.Sprintf("illegal value: %T", a)) +} + +type termSlice []*Term + +func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 } +func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s termSlice) Len() int { return len(s) } + +func sortOrder(x interface{}) int { + switch x.(type) { + case Null: + return 0 + case Boolean: + return 1 + case Number: + return 2 + case String: + return 3 + case Var: + return 4 + case Ref: + return 5 + case *Array: + return 6 + case Object: + return 7 + case Set: + return 8 + case *ArrayComprehension: + return 9 + case *ObjectComprehension: + return 10 + case *SetComprehension: + return 11 + case Call: + return 12 + case Args: + return 13 + case *Expr: + return 100 + case *SomeDecl: + return 101 + case *Every: + return 102 + case *With: + return 110 + case *Head: + return 120 + case Body: + return 200 + case *Rule: + return 1000 + case *Import: + return 1001 + case *Package: + return 1002 + case *Annotations: + return 1003 + case *Module: + return 10000 + } + panic(fmt.Sprintf("illegal value: %T", x)) +} + +func importsCompare(a, b []*Import) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := range minLen { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } + if len(b) < len(a) { + return 1 + } + return 0 +} + +func annotationsCompare(a, b []*Annotations) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := range minLen { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } + if len(b) < len(a) { + return 1 + } + return 0 +} + +func rulesCompare(a, b []*Rule) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := range minLen { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } + if len(b) < len(a) { + return 1 + } + return 0 +} + +func termSliceCompare(a, b []*Term) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := range minLen { + if cmp := Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } else if len(b) < len(a) { + return 1 + } + return 0 +} + +func withSliceCompare(a, b []*With) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := range minLen { + if cmp := Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } else if len(b) < len(a) { + return 1 + } + return 0 +} + +func VarCompare(a, b Var) int { + if a == b { + return 0 + } + if a < b { + return -1 + } + return 1 +} + +func TermValueCompare(a, b *Term) int { + return a.Value.Compare(b.Value) +} + +func TermValueEqual(a, b *Term) bool { + return ValueEqual(a.Value, b.Value) +} + +func ValueEqual(a, b Value) bool { + // TODO(ae): why doesn't this work the same? + // + // case interface{ Equal(Value) bool }: + // return v.Equal(b) + // + // When put on top, golangci-lint even flags the other cases as unreachable.. + // but TestTopdownVirtualCache will have failing test cases when we replace + // the other cases with the above one.. 🤔 + switch v := a.(type) { + case Null: + return v.Equal(b) + case Boolean: + return v.Equal(b) + case Number: + return v.Equal(b) + case String: + return v.Equal(b) + case Var: + return v.Equal(b) + case Ref: + return v.Equal(b) + case *Array: + return v.Equal(b) + } + + return a.Compare(b) == 0 +} + +func RefCompare(a, b Ref) int { + return termSliceCompare(a, b) +} + +func RefEqual(a, b Ref) bool { + return termSliceEqual(a, b) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go new file mode 100644 index 00000000000..13855692cd6 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go @@ -0,0 +1,5979 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "errors" + "fmt" + "io" + "maps" + "slices" + "sort" + "strconv" + "strings" + + "github.com/open-policy-agent/opa/internal/debug" + "github.com/open-policy-agent/opa/internal/gojsonschema" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// CompileErrorLimitDefault is the default number errors a compiler will allow before +// exiting. +const CompileErrorLimitDefault = 10 + +var errLimitReached = NewError(CompileErr, nil, "error limit reached") + +// Compiler contains the state of a compilation process. +type Compiler struct { + + // Errors contains errors that occurred during the compilation process. + // If there are one or more errors, the compilation process is considered + // "failed". + Errors Errors + + // Modules contains the compiled modules. The compiled modules are the + // output of the compilation process. If the compilation process failed, + // there is no guarantee about the state of the modules. + Modules map[string]*Module + + // ModuleTree organizes the modules into a tree where each node is keyed by + // an element in the module's package path. E.g., given modules containing + // the following package directives: "a", "a.b", "a.c", and "a.b", the + // resulting module tree would be: + // + // root + // | + // +--- data (no modules) + // | + // +--- a (1 module) + // | + // +--- b (2 modules) + // | + // +--- c (1 module) + // + ModuleTree *ModuleTreeNode + + // RuleTree organizes rules into a tree where each node is keyed by an + // element in the rule's path. The rule path is the concatenation of the + // containing package and the stringified rule name. E.g., given the + // following module: + // + // package ex + // p[1] { true } + // p[2] { true } + // q = true + // a.b.c = 3 + // + // root + // | + // +--- data (no rules) + // | + // +--- ex (no rules) + // | + // +--- p (2 rules) + // | + // +--- q (1 rule) + // | + // +--- a + // | + // +--- b + // | + // +--- c (1 rule) + // + // Another example with general refs containing vars at arbitrary locations: + // + // package ex + // a.b[x].d { x := "c" } # R1 + // a.b.c[x] { x := "d" } # R2 + // a.b[x][y] { x := "c"; y := "d" } # R3 + // p := true # R4 + // + // root + // | + // +--- data (no rules) + // | + // +--- ex (no rules) + // | + // +--- a + // | | + // | +--- b (R1, R3) + // | | + // | +--- c (R2) + // | + // +--- p (R4) + RuleTree *TreeNode + + // Graph contains dependencies between rules. An edge (u,v) is added to the + // graph if rule 'u' refers to the virtual document defined by 'v'. + Graph *Graph + + // TypeEnv holds type information for values inferred by the compiler. + TypeEnv *TypeEnv + + // RewrittenVars is a mapping of variables that have been rewritten + // with the key being the generated name and value being the original. + RewrittenVars map[Var]Var + + // Capabilities required by the modules that were compiled. + Required *Capabilities + + localvargen *localVarGenerator + moduleLoader ModuleLoader + ruleIndices *util.HasherMap[Ref, RuleIndex] + stages []stage + maxErrs int + sorted []string // list of sorted module names + pathExists func([]string) (bool, error) + pathConflictCheckRoots []string + after map[string][]CompilerStageDefinition + metrics metrics.Metrics + capabilities *Capabilities // user-supplied capabilities + imports map[string][]*Import // saved imports from stripping + builtins map[string]*Builtin // universe of built-in functions + customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities) + unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities) + deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions + enablePrintStatements bool // indicates if print statements should be elided (default) + comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index + initialized bool // indicates if init() has been called + debug debug.Debug // emits debug information produced during compilation + schemaSet *SchemaSet // user-supplied schemas for input and data documents + inputType types.Type // global input type retrieved from schema set + annotationSet *AnnotationSet // hierarchical set of annotations + strict bool // enforce strict compilation checks + keepModules bool // whether to keep the unprocessed, parse modules (below) + parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true + useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker + allowUndefinedFuncCalls bool // don't error on calls to unknown functions. + evalMode CompilerEvalMode // + rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing. + defaultRegoVersion RegoVersion +} + +func (c *Compiler) DefaultRegoVersion() RegoVersion { + return c.defaultRegoVersion +} + +// CompilerStage defines the interface for stages in the compiler. +type CompilerStage func(*Compiler) *Error + +// CompilerEvalMode allows toggling certain stages that are only +// needed for certain modes, Concretely, only "topdown" mode will +// have the compiler build comprehension and rule indices. +type CompilerEvalMode int + +const ( + // EvalModeTopdown (default) instructs the compiler to build rule + // and comprehension indices used by topdown evaluation. + EvalModeTopdown CompilerEvalMode = iota + + // EvalModeIR makes the compiler skip the stages for comprehension + // and rule indices. + EvalModeIR +) + +// CompilerStageDefinition defines a compiler stage +type CompilerStageDefinition struct { + Name string + MetricName string + Stage CompilerStage +} + +// RulesOptions defines the options for retrieving rules by Ref from the +// compiler. +type RulesOptions struct { + // IncludeHiddenModules determines if the result contains hidden modules, + // currently only the "system" namespace, i.e. "data.system.*". + IncludeHiddenModules bool +} + +// QueryContext contains contextual information for running an ad-hoc query. +// +// Ad-hoc queries can be run in the context of a package and imports may be +// included to provide concise access to data. +type QueryContext struct { + Package *Package + Imports []*Import +} + +// NewQueryContext returns a new QueryContext object. +func NewQueryContext() *QueryContext { + return &QueryContext{} +} + +// WithPackage sets the pkg on qc. +func (qc *QueryContext) WithPackage(pkg *Package) *QueryContext { + if qc == nil { + qc = NewQueryContext() + } + qc.Package = pkg + return qc +} + +// WithImports sets the imports on qc. +func (qc *QueryContext) WithImports(imports []*Import) *QueryContext { + if qc == nil { + qc = NewQueryContext() + } + qc.Imports = imports + return qc +} + +// Copy returns a deep copy of qc. +func (qc *QueryContext) Copy() *QueryContext { + if qc == nil { + return nil + } + cpy := *qc + if cpy.Package != nil { + cpy.Package = qc.Package.Copy() + } + cpy.Imports = make([]*Import, len(qc.Imports)) + for i := range qc.Imports { + cpy.Imports[i] = qc.Imports[i].Copy() + } + return &cpy +} + +// QueryCompiler defines the interface for compiling ad-hoc queries. +type QueryCompiler interface { + + // Compile should be called to compile ad-hoc queries. The return value is + // the compiled version of the query. + Compile(q Body) (Body, error) + + // TypeEnv returns the type environment built after running type checking + // on the query. + TypeEnv() *TypeEnv + + // WithContext sets the QueryContext on the QueryCompiler. Subsequent calls + // to Compile will take the QueryContext into account. + WithContext(qctx *QueryContext) QueryCompiler + + // WithEnablePrintStatements enables print statements in queries compiled + // with the QueryCompiler. + WithEnablePrintStatements(yes bool) QueryCompiler + + // WithUnsafeBuiltins sets the built-in functions to treat as unsafe and not + // allow inside of queries. By default the query compiler inherits the + // compiler's unsafe built-in functions. This function allows callers to + // override that set. If an empty (non-nil) map is provided, all built-ins + // are allowed. + WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler + + // WithStageAfter registers a stage to run during query compilation after + // the named stage. + WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler + + // RewrittenVars maps generated vars in the compiled query to vars from the + // parsed query. For example, given the query "input := 1" the rewritten + // query would be "__local0__ = 1". The mapping would then be {__local0__: input}. + RewrittenVars() map[Var]Var + + // ComprehensionIndex returns an index data structure for the given comprehension + // term. If no index is found, returns nil. + ComprehensionIndex(term *Term) *ComprehensionIndex + + // WithStrict enables strict mode for the query compiler. + WithStrict(strict bool) QueryCompiler +} + +// QueryCompilerStage defines the interface for stages in the query compiler. +type QueryCompilerStage func(QueryCompiler, Body) (Body, error) + +// QueryCompilerStageDefinition defines a QueryCompiler stage +type QueryCompilerStageDefinition struct { + Name string + MetricName string + Stage QueryCompilerStage +} + +type stage struct { + name string + metricName string + f func() +} + +// NewCompiler returns a new empty compiler. +func NewCompiler() *Compiler { + + c := &Compiler{ + Modules: map[string]*Module{}, + RewrittenVars: map[Var]Var{}, + Required: &Capabilities{}, + ruleIndices: util.NewHasherMap[Ref, RuleIndex](RefEqual), + maxErrs: CompileErrorLimitDefault, + after: map[string][]CompilerStageDefinition{}, + unsafeBuiltinsMap: map[string]struct{}{}, + deprecatedBuiltinsMap: map[string]struct{}{}, + comprehensionIndices: map[*Term]*ComprehensionIndex{}, + debug: debug.Discard(), + defaultRegoVersion: DefaultRegoVersion, + } + + c.ModuleTree = NewModuleTree(nil) + c.RuleTree = NewRuleTree(c.ModuleTree) + + c.stages = []stage{ + // Reference resolution should run first as it may be used to lazily + // load additional modules. If any stages run before resolution, they + // need to be re-run after resolution. + {"ResolveRefs", "compile_stage_resolve_refs", c.resolveAllRefs}, + // The local variable generator must be initialized after references are + // resolved and the dynamic module loader has run but before subsequent + // stages that need to generate variables. + {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen}, + {"RewriteRuleHeadRefs", "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs}, + {"CheckKeywordOverrides", "compile_stage_check_keyword_overrides", c.checkKeywordOverrides}, + {"CheckDuplicateImports", "compile_stage_check_imports", c.checkImports}, + {"RemoveImports", "compile_stage_remove_imports", c.removeImports}, + {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree}, + {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs + {"RewriteLocalVars", "compile_stage_rewrite_local_vars", c.rewriteLocalVars}, + {"CheckVoidCalls", "compile_stage_check_void_calls", c.checkVoidCalls}, + {"RewritePrintCalls", "compile_stage_rewrite_print_calls", c.rewritePrintCalls}, + {"RewriteExprTerms", "compile_stage_rewrite_expr_terms", c.rewriteExprTerms}, + {"ParseMetadataBlocks", "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks}, + {"SetAnnotationSet", "compile_stage_set_annotationset", c.setAnnotationSet}, + {"RewriteRegoMetadataCalls", "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls}, + {"SetGraph", "compile_stage_set_graph", c.setGraph}, + {"RewriteComprehensionTerms", "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms}, + {"RewriteRefsInHead", "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead}, + {"RewriteWithValues", "compile_stage_rewrite_with_values", c.rewriteWithModifiers}, + {"CheckRuleConflicts", "compile_stage_check_rule_conflicts", c.checkRuleConflicts}, + {"CheckUndefinedFuncs", "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs}, + {"CheckSafetyRuleHeads", "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads}, + {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies}, + {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals}, + {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms}, + {"RewriteTestRulesForTracing", "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms + {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion}, + {"CheckTypes", "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion + {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins}, + {"CheckDeprecatedBuiltins", "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins}, + {"BuildRuleIndices", "compile_stage_rebuild_indices", c.buildRuleIndices}, + {"BuildComprehensionIndices", "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices}, + {"BuildRequiredCapabilities", "compile_stage_build_required_capabilities", c.buildRequiredCapabilities}, + } + + return c +} + +// SetErrorLimit sets the number of errors the compiler can encounter before it +// quits. Zero or a negative number indicates no limit. +func (c *Compiler) SetErrorLimit(limit int) *Compiler { + c.maxErrs = limit + return c +} + +// WithEnablePrintStatements enables print statements inside of modules compiled +// by the compiler. If print statements are not enabled, calls to print() are +// erased at compile-time. +func (c *Compiler) WithEnablePrintStatements(yes bool) *Compiler { + c.enablePrintStatements = yes + return c +} + +// WithPathConflictsCheck enables base-virtual document conflict +// detection. The compiler will check that rules don't overlap with +// paths that exist as determined by the provided callable. +func (c *Compiler) WithPathConflictsCheck(fn func([]string) (bool, error)) *Compiler { + c.pathExists = fn + return c +} + +// WithPathConflictsCheckRoots enables checking path conflicts from the specified root instead +// of the top root node. Limiting conflict checks to a known set of roots, such as bundle roots, +// improves performance. Each root has the format of a "/"-delimited string, excluding the "data" +// root document. +func (c *Compiler) WithPathConflictsCheckRoots(rootPaths []string) *Compiler { + c.pathConflictCheckRoots = rootPaths + return c +} + +// WithStageAfter registers a stage to run during compilation after +// the named stage. +func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler { + c.after[after] = append(c.after[after], stage) + return c +} + +// WithMetrics will set a metrics.Metrics and be used for profiling +// the Compiler instance. +func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler { + c.metrics = metrics + return c +} + +// WithCapabilities sets capabilities to enable during compilation. Capabilities allow the caller +// to specify the set of built-in functions available to the policy. In the future, capabilities +// may be able to restrict access to other language features. Capabilities allow callers to check +// if policies are compatible with a particular version of OPA. If policies are a compiled for a +// specific version of OPA, there is no guarantee that _this_ version of OPA can evaluate them +// successfully. +func (c *Compiler) WithCapabilities(capabilities *Capabilities) *Compiler { + c.capabilities = capabilities + return c +} + +// Capabilities returns the capabilities enabled during compilation. +func (c *Compiler) Capabilities() *Capabilities { + return c.capabilities +} + +// WithDebug sets where debug messages are written to. Passing `nil` has no +// effect. +func (c *Compiler) WithDebug(sink io.Writer) *Compiler { + if sink != nil { + c.debug = debug.New(sink) + } + return c +} + +// WithBuiltins is deprecated. +// Deprecated: Use WithCapabilities instead. +func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler { + c.customBuiltins = maps.Clone(builtins) + return c +} + +// WithUnsafeBuiltins is deprecated. +// Deprecated: Use WithCapabilities instead. +func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler { + maps.Copy(c.unsafeBuiltinsMap, unsafeBuiltins) + return c +} + +// WithStrict toggles strict mode in the compiler. +func (c *Compiler) WithStrict(strict bool) *Compiler { + c.strict = strict + return c +} + +// WithKeepModules enables retaining unprocessed modules in the compiler. +// Note that the modules aren't copied on the way in or out -- so when +// accessing them via ParsedModules(), mutations will occur in the module +// map that was passed into Compile().` +func (c *Compiler) WithKeepModules(y bool) *Compiler { + c.keepModules = y + return c +} + +// WithUseTypeCheckAnnotations use schema annotations during type checking +func (c *Compiler) WithUseTypeCheckAnnotations(enabled bool) *Compiler { + c.useTypeCheckAnnotations = enabled + return c +} + +func (c *Compiler) WithAllowUndefinedFunctionCalls(allow bool) *Compiler { + c.allowUndefinedFuncCalls = allow + return c +} + +// WithEvalMode allows setting the CompilerEvalMode of the compiler +func (c *Compiler) WithEvalMode(e CompilerEvalMode) *Compiler { + c.evalMode = e + return c +} + +// WithRewriteTestRules enables rewriting test rules to capture dynamic values in local variables, +// so they can be accessed by tracing. +func (c *Compiler) WithRewriteTestRules(rewrite bool) *Compiler { + c.rewriteTestRulesForTracing = rewrite + return c +} + +// ParsedModules returns the parsed, unprocessed modules from the compiler. +// It is `nil` if keeping modules wasn't enabled via `WithKeepModules(true)`. +// The map includes all modules loaded via the ModuleLoader, if one was used. +func (c *Compiler) ParsedModules() map[string]*Module { + return c.parsedModules +} + +func (c *Compiler) QueryCompiler() QueryCompiler { + c.init() + c0 := *c + return newQueryCompiler(&c0) +} + +// Compile runs the compilation process on the input modules. The compiled +// version of the modules and associated data structures are stored on the +// compiler. If the compilation process fails for any reason, the compiler will +// contain a slice of errors. +func (c *Compiler) Compile(modules map[string]*Module) { + + c.init() + + c.Modules = make(map[string]*Module, len(modules)) + c.sorted = make([]string, 0, len(modules)) + + if c.keepModules { + c.parsedModules = make(map[string]*Module, len(modules)) + } else { + c.parsedModules = nil + } + + for k, v := range modules { + c.Modules[k] = v.Copy() + c.sorted = append(c.sorted, k) + if c.parsedModules != nil { + c.parsedModules[k] = v + } + } + + sort.Strings(c.sorted) + + c.compile() +} + +// WithSchemas sets a schemaSet to the compiler +func (c *Compiler) WithSchemas(schemas *SchemaSet) *Compiler { + c.schemaSet = schemas + return c +} + +// Failed returns true if a compilation error has been encountered. +func (c *Compiler) Failed() bool { + return len(c.Errors) > 0 +} + +// ComprehensionIndex returns a data structure specifying how to index comprehension +// results so that callers do not have to recompute the comprehension more than once. +// If no index is found, returns nil. +func (c *Compiler) ComprehensionIndex(term *Term) *ComprehensionIndex { + return c.comprehensionIndices[term] +} + +// GetArity returns the number of args a function referred to by ref takes. If +// ref refers to built-in function, the built-in declaration is consulted, +// otherwise, the ref is used to perform a ruleset lookup. +func (c *Compiler) GetArity(ref Ref) int { + if bi := c.builtins[ref.String()]; bi != nil { + return bi.Decl.Arity() + } + rules := c.GetRulesExact(ref) + if len(rules) == 0 { + return -1 + } + return len(rules[0].Head.Args) +} + +// GetRulesExact returns a slice of rules referred to by the reference. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[k] = v { ... } # rule1 +// p[k1] = v1 { ... } # rule2 +// +// The following calls yield the rules on the right. +// +// GetRulesExact("data.a.b.c.p") => [rule1, rule2] +// GetRulesExact("data.a.b.c.p.x") => nil +// GetRulesExact("data.a.b.c") => nil +func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) { + node := c.RuleTree + + for _, x := range ref { + if node = node.Child(x.Value); node == nil { + return nil + } + } + + return extractRules(node.Values) +} + +// GetRulesForVirtualDocument returns a slice of rules that produce the virtual +// document referred to by the reference. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[k] = v { ... } # rule1 +// p[k1] = v1 { ... } # rule2 +// +// The following calls yield the rules on the right. +// +// GetRulesForVirtualDocument("data.a.b.c.p") => [rule1, rule2] +// GetRulesForVirtualDocument("data.a.b.c.p.x") => [rule1, rule2] +// GetRulesForVirtualDocument("data.a.b.c") => nil +func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) { + + node := c.RuleTree + + for _, x := range ref { + if node = node.Child(x.Value); node == nil { + return nil + } + if len(node.Values) > 0 { + return extractRules(node.Values) + } + } + + return extractRules(node.Values) +} + +// GetRulesWithPrefix returns a slice of rules that share the prefix ref. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[x] = y { ... } # rule1 +// p[k] = v { ... } # rule2 +// q { ... } # rule3 +// +// The following calls yield the rules on the right. +// +// GetRulesWithPrefix("data.a.b.c.p") => [rule1, rule2] +// GetRulesWithPrefix("data.a.b.c.p.a") => nil +// GetRulesWithPrefix("data.a.b.c") => [rule1, rule2, rule3] +func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) { + + node := c.RuleTree + + for _, x := range ref { + if node = node.Child(x.Value); node == nil { + return nil + } + } + + var acc func(node *TreeNode) + + acc = func(node *TreeNode) { + rules = append(rules, extractRules(node.Values)...) + for _, child := range node.Children { + if child.Hide { + continue + } + acc(child) + } + } + + acc(node) + + return rules +} + +func extractRules(s []any) []*Rule { + rules := make([]*Rule, len(s)) + for i := range s { + rules[i] = s[i].(*Rule) + } + return rules +} + +// GetRules returns a slice of rules that are referred to by ref. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[x] = y { q[x] = y; ... } # rule1 +// q[x] = y { ... } # rule2 +// +// The following calls yield the rules on the right. +// +// GetRules("data.a.b.c.p") => [rule1] +// GetRules("data.a.b.c.p.x") => [rule1] +// GetRules("data.a.b.c.q") => [rule2] +// GetRules("data.a.b.c") => [rule1, rule2] +// GetRules("data.a.b.d") => nil +func (c *Compiler) GetRules(ref Ref) (rules []*Rule) { + + set := map[*Rule]struct{}{} + + for _, rule := range c.GetRulesForVirtualDocument(ref) { + set[rule] = struct{}{} + } + + for _, rule := range c.GetRulesWithPrefix(ref) { + set[rule] = struct{}{} + } + + for rule := range set { + rules = append(rules, rule) + } + + return rules +} + +// GetRulesDynamic returns a slice of rules that could be referred to by a ref. +// +// Deprecated: use GetRulesDynamicWithOpts +func (c *Compiler) GetRulesDynamic(ref Ref) []*Rule { + return c.GetRulesDynamicWithOpts(ref, RulesOptions{}) +} + +// GetRulesDynamicWithOpts returns a slice of rules that could be referred to by +// a ref. +// When parts of the ref are statically known, we use that information to narrow +// down which rules the ref could refer to, but in the most general case this +// will be an over-approximation. +// +// E.g., given the following modules: +// +// package a.b.c +// +// r1 = 1 # rule1 +// +// and: +// +// package a.d.c +// +// r2 = 2 # rule2 +// +// The following calls yield the rules on the right. +// +// GetRulesDynamicWithOpts("data.a[x].c[y]", opts) => [rule1, rule2] +// GetRulesDynamicWithOpts("data.a[x].c.r2", opts) => [rule2] +// GetRulesDynamicWithOpts("data.a.b[x][y]", opts) => [rule1] +// +// Using the RulesOptions parameter, the inclusion of hidden modules can be +// controlled: +// +// With +// +// package system.main +// +// r3 = 3 # rule3 +// +// We'd get this result: +// +// GetRulesDynamicWithOpts("data[x]", RulesOptions{IncludeHiddenModules: true}) => [rule1, rule2, rule3] +// +// Without the options, it would be excluded. +func (c *Compiler) GetRulesDynamicWithOpts(ref Ref, opts RulesOptions) []*Rule { + node := c.RuleTree + + set := map[*Rule]struct{}{} + var walk func(node *TreeNode, i int) + walk = func(node *TreeNode, i int) { + switch { + case i >= len(ref): + // We've reached the end of the reference and want to collect everything + // under this "prefix". + node.DepthFirst(func(descendant *TreeNode) bool { + insertRules(set, descendant.Values) + if opts.IncludeHiddenModules { + return false + } + return descendant.Hide + }) + + case i == 0 || IsConstant(ref[i].Value): + // The head of the ref is always grounded. In case another part of the + // ref is also grounded, we can lookup the exact child. If it's not found + // we can immediately return... + if child := node.Child(ref[i].Value); child != nil { + if len(child.Values) > 0 { + // Add any rules at this position + insertRules(set, child.Values) + } + // There might still be "sub-rules" contributing key-value "overrides" for e.g. partial object rules, continue walking + walk(child, i+1) + } else { + return + } + + default: + // This part of the ref is a dynamic term. We can't know what it refers + // to and will just need to try all of the children. + for _, child := range node.Children { + if child.Hide && !opts.IncludeHiddenModules { + continue + } + insertRules(set, child.Values) + walk(child, i+1) + } + } + } + + walk(node, 0) + rules := make([]*Rule, 0, len(set)) + for rule := range set { + rules = append(rules, rule) + } + return rules +} + +// Utility: add all rule values to the set. +func insertRules(set map[*Rule]struct{}, rules []any) { + for _, rule := range rules { + set[rule.(*Rule)] = struct{}{} + } +} + +// RuleIndex returns a RuleIndex built for the rule set referred to by path. +// The path must refer to the rule set exactly, i.e., given a rule set at path +// data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a +// RuleIndex built for the rule. +func (c *Compiler) RuleIndex(path Ref) RuleIndex { + r, ok := c.ruleIndices.Get(path) + if !ok { + return nil + } + return r +} + +// PassesTypeCheck determines whether the given body passes type checking +func (c *Compiler) PassesTypeCheck(body Body) bool { + checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) + env := c.TypeEnv + _, errs := checker.CheckBody(env, body) + return len(errs) == 0 +} + +// PassesTypeCheckRules determines whether the given rules passes type checking +func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors { + elems := []util.T{} + + for _, rule := range rules { + elems = append(elems, rule) + } + + // Load the global input schema if one was provided. + if c.schemaSet != nil { + if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { + + var allowNet []string + if c.capabilities != nil { + allowNet = c.capabilities.AllowNet + } + + tpe, err := loadSchema(schema, allowNet) + if err != nil { + return Errors{NewError(TypeErr, nil, err.Error())} //nolint:govet + } + c.inputType = tpe + } + } + + var as *AnnotationSet + if c.useTypeCheckAnnotations { + as = c.annotationSet + } + + checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) + + if c.TypeEnv == nil { + if c.capabilities == nil { + c.capabilities = CapabilitiesForThisVersion() + } + + c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) + + for _, bi := range c.capabilities.Builtins { + c.builtins[bi.Name] = bi + } + + for name, bi := range c.customBuiltins { + c.builtins[name] = bi + } + + c.TypeEnv = checker.Env(c.builtins) + } + + _, errs := checker.CheckTypes(c.TypeEnv, elems, as) + return errs +} + +// ModuleLoader defines the interface that callers can implement to enable lazy +// loading of modules during compilation. +type ModuleLoader func(resolved map[string]*Module) (parsed map[string]*Module, err error) + +// WithModuleLoader sets f as the ModuleLoader on the compiler. +// +// The compiler will invoke the ModuleLoader after resolving all references in +// the current set of input modules. The ModuleLoader can return a new +// collection of parsed modules that are to be included in the compilation +// process. This process will repeat until the ModuleLoader returns an empty +// collection or an error. If an error is returned, compilation will stop +// immediately. +func (c *Compiler) WithModuleLoader(f ModuleLoader) *Compiler { + c.moduleLoader = f + return c +} + +// WithDefaultRegoVersion sets the default Rego version to use when a module doesn't specify one; +// such as when it's hand-crafted instead of parsed. +func (c *Compiler) WithDefaultRegoVersion(regoVersion RegoVersion) *Compiler { + c.defaultRegoVersion = regoVersion + return c +} + +func (c *Compiler) counterAdd(name string, n uint64) { + if c.metrics == nil { + return + } + c.metrics.Counter(name).Add(n) +} + +func (c *Compiler) buildRuleIndices() { + + c.RuleTree.DepthFirst(func(node *TreeNode) bool { + if len(node.Values) == 0 { + return false + } + rules := extractRules(node.Values) + hasNonGroundRef := false + for _, r := range rules { + hasNonGroundRef = !r.Head.Ref().IsGround() + } + if hasNonGroundRef { + // Collect children to ensure that all rules within the extent of a rule with a general ref + // are found on the same index. E.g. the following rules should be indexed under data.a.b.c: + // + // package a + // b.c[x].e := 1 { x := input.x } + // b.c.d := 2 + // b.c.d2.e[x] := 3 { x := input.x } + for _, child := range node.Children { + child.DepthFirst(func(c *TreeNode) bool { + rules = append(rules, extractRules(c.Values)...) + return false + }) + } + } + + index := newBaseDocEqIndex(func(ref Ref) bool { + return isVirtual(c.RuleTree, ref.GroundPrefix()) + }) + if index.Build(rules) { + c.ruleIndices.Put(rules[0].Ref().GroundPrefix(), index) + } + return hasNonGroundRef // currently, we don't allow those branches to go deeper + }) + +} + +func (c *Compiler) buildComprehensionIndices() { + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(r *Rule) bool { + candidates := r.Head.Args.Vars() + candidates.Update(ReservedVars) + n := buildComprehensionIndices(c.debug, c.GetArity, candidates, c.RewrittenVars, r.Body, c.comprehensionIndices) + c.counterAdd(compileStageComprehensionIndexBuild, n) + return false + }) + } +} + +var ( + keywordsTerm = StringTerm("keywords") + pathTerm = StringTerm("path") + annotationsTerm = StringTerm("annotations") + futureKeywordsPrefix = Ref{FutureRootDocument, keywordsTerm} +) + +// buildRequiredCapabilities updates the required capabilities on the compiler +// to include any keyword and feature dependencies present in the modules. The +// built-in function dependencies will have already been added by the type +// checker. +func (c *Compiler) buildRequiredCapabilities() { + + features := map[string]struct{}{} + + // extract required keywords from modules + + keywords := map[string]struct{}{} + + for _, name := range c.sorted { + for _, imp := range c.imports[name] { + mod := c.Modules[name] + path := imp.Path.Value.(Ref) + switch { + case path.Equal(RegoV1CompatibleRef): + if !c.moduleIsRegoV1(mod) { + features[FeatureRegoV1Import] = struct{}{} + } + case path.HasPrefix(futureKeywordsPrefix): + if len(path) == 2 { + if c.moduleIsRegoV1(mod) { + for kw := range futureKeywords { + keywords[kw] = struct{}{} + } + } else { + for kw := range allFutureKeywords { + keywords[kw] = struct{}{} + } + } + } else { + kw := string(path[2].Value.(String)) + if c.moduleIsRegoV1(mod) { + for allowedKw := range futureKeywords { + if kw == allowedKw { + keywords[kw] = struct{}{} + break + } + } + } else { + for allowedKw := range allFutureKeywords { + if kw == allowedKw { + keywords[kw] = struct{}{} + break + } + } + } + } + } + } + } + + c.Required.FutureKeywords = util.KeysSorted(keywords) + + // extract required features from modules + + for _, name := range c.sorted { + mod := c.Modules[name] + + if c.moduleIsRegoV1(mod) { + features[FeatureRegoV1] = struct{}{} + } else { + for _, rule := range mod.Rules { + refLen := len(rule.Head.Reference) + if refLen >= 3 { + if refLen > len(rule.Head.Reference.ConstantPrefix()) { + features[FeatureRefHeads] = struct{}{} + } else { + features[FeatureRefHeadStringPrefixes] = struct{}{} + } + } + } + } + } + + c.Required.Features = util.KeysSorted(features) + + for i, bi := range c.Required.Builtins { + c.Required.Builtins[i] = bi.Minimal() + } +} + +// checkRecursion ensures that there are no recursive definitions, i.e., there are +// no cycles in the Graph. +func (c *Compiler) checkRecursion() { + eq := func(a, b util.T) bool { + return a.(*Rule) == b.(*Rule) + } + + c.RuleTree.DepthFirst(func(node *TreeNode) bool { + for _, rule := range node.Values { + for node := rule.(*Rule); node != nil; node = node.Else { + c.checkSelfPath(node.Loc(), eq, node, node) + } + } + return false + }) +} + +func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b util.T) { + tr := NewGraphTraversal(c.Graph) + if p := util.DFSPath(tr, eq, a, b); len(p) > 0 { + n := make([]string, 0, len(p)) + for _, x := range p { + n = append(n, astNodeToString(x)) + } + c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> "))) + } +} + +func astNodeToString(x interface{}) string { + return x.(*Rule).Ref().String() +} + +// checkRuleConflicts ensures that rules definitions are not in conflict. +func (c *Compiler) checkRuleConflicts() { + rw := rewriteVarsInRef(c.RewrittenVars) + + c.RuleTree.DepthFirst(func(node *TreeNode) bool { + if len(node.Values) == 0 { + return false // go deeper + } + + kinds := make(map[RuleKind]struct{}, len(node.Values)) + completeRules := 0 + partialRules := 0 + arities := make(map[int]struct{}, len(node.Values)) + name := "" + var conflicts []Ref + defaultRules := make([]*Rule, 0) + + for _, rule := range node.Values { + r := rule.(*Rule) + ref := r.Ref() + name = rw(ref.CopyNonGround()).String() // varRewriter operates in-place + kinds[r.Head.RuleKind()] = struct{}{} + arities[len(r.Head.Args)] = struct{}{} + if r.Default { + defaultRules = append(defaultRules, r) + } + + // Single-value rules may not have any other rules in their extent. + // Rules with vars in their ref are allowed to have rules inside their extent. + // Only the ground portion (terms before the first var term) of a rule's ref is considered when determining + // whether it's inside the extent of another (c.RuleTree is organized this way already). + // These pairs are invalid: + // + // data.p.q.r { true } # data.p.q is { "r": true } + // data.p.q.r.s { true } + // + // data.p.q.r { true } + // data.p.q.r[s].t { s = input.key } + // + // But this is allowed: + // + // data.p.q.r { true } + // data.p.q[r].s.t { r = input.key } + // + // data.p[r] := x { r = input.key; x = input.bar } + // data.p.q[r] := x { r = input.key; x = input.bar } + // + // data.p.q[r] { r := input.r } + // data.p.q.r.s { true } + // + // data.p.q[r] = 1 { r := "r" } + // data.p.q.s = 2 + // + // data.p[q][r] { q := input.q; r := input.r } + // data.p.q.r { true } + // + // data.p.q[r] { r := input.r } + // data.p[q].r { q := input.q } + // + // data.p.q[r][s] { r := input.r; s := input.s } + // data.p[q].r.s { q := input.q } + + if ref.IsGround() && len(node.Children) > 0 { + conflicts = node.flattenChildren() + } + + if r.Head.RuleKind() == SingleValue && r.Head.Ref().IsGround() { + completeRules++ + } else { + partialRules++ + } + } + + switch { + case conflicts != nil: + c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "rule %v conflicts with %v", name, conflicts)) + + case len(kinds) > 1 || len(arities) > 1 || (completeRules >= 1 && partialRules >= 1): + c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules %v found", name)) + + case len(defaultRules) > 1: + + defaultRuleLocations := strings.Builder{} + defaultRuleLocations.WriteString(defaultRules[0].Loc().String()) + for i := 1; i < len(defaultRules); i++ { + defaultRuleLocations.WriteString(", ") + defaultRuleLocations.WriteString(defaultRules[i].Loc().String()) + } + + c.err(NewError( + TypeErr, + defaultRules[0].Module.Package.Loc(), + "multiple default rules %s found at %s", + name, defaultRuleLocations.String()), + ) + } + + return false + }) + + if c.pathExists != nil { + for _, err := range CheckPathConflicts(c, c.pathExists) { + c.err(err) + } + } + + // NOTE(sr): depthfirst might better use sorted for stable errs? + c.ModuleTree.DepthFirst(func(node *ModuleTreeNode) bool { + for _, mod := range node.Modules { + for _, rule := range mod.Rules { + ref := rule.Head.Ref().GroundPrefix() + // Rules with a dynamic portion in their ref are exempted, as a conflict within the dynamic portion + // can only be detected at eval-time. + if len(ref) < len(rule.Head.Ref()) { + continue + } + + childNode, tail := node.find(ref) + if childNode != nil && len(tail) == 0 { + for _, childMod := range childNode.Modules { + // Avoid recursively checking a module for equality unless we know it's a possible self-match. + if childMod.Equal(mod) { + continue // don't self-conflict + } + msg := fmt.Sprintf("%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc()) + c.err(NewError(TypeErr, mod.Package.Loc(), msg)) //nolint:govet + } + } + } + } + return false + }) +} + +func (c *Compiler) checkUndefinedFuncs() { + for _, name := range c.sorted { + m := c.Modules[name] + for _, err := range checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars) { + c.err(err) + } + } +} + +func checkUndefinedFuncs(env *TypeEnv, x interface{}, arity func(Ref) int, rwVars map[Var]Var) Errors { + + var errs Errors + + WalkExprs(x, func(expr *Expr) bool { + if !expr.IsCall() { + return false + } + ref := expr.Operator() + if arity := arity(ref); arity >= 0 { + operands := len(expr.Operands()) + if expr.Generated { // an output var was added + if !expr.IsEquality() && operands != arity+1 { + ref = rewriteVarsInRef(rwVars)(ref) + errs = append(errs, arityMismatchError(env, ref, expr, arity, operands-1)) + return true + } + } else { // either output var or not + if operands != arity && operands != arity+1 { + ref = rewriteVarsInRef(rwVars)(ref) + errs = append(errs, arityMismatchError(env, ref, expr, arity, operands)) + return true + } + } + return false + } + ref = rewriteVarsInRef(rwVars)(ref) + errs = append(errs, NewError(TypeErr, expr.Loc(), "undefined function %v", ref)) + return true + }) + + return errs +} + +func arityMismatchError(env *TypeEnv, f Ref, expr *Expr, exp, act int) *Error { + if want, ok := env.Get(f).(*types.Function); ok { // generate richer error for built-in functions + have := make([]types.Type, len(expr.Operands())) + for i, op := range expr.Operands() { + have[i] = env.Get(op) + } + return newArgError(expr.Loc(), f, "arity mismatch", have, want.NamedFuncArgs()) + } + if act != 1 { + return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d arguments", f, exp, act) + } + return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d argument", f, exp, act) +} + +// checkSafetyRuleBodies ensures that variables appearing in negated expressions or non-target +// positions of built-in expressions will be bound when evaluating the rule from left +// to right, re-ordering as necessary. +func (c *Compiler) checkSafetyRuleBodies() { + for _, name := range c.sorted { + m := c.Modules[name] + WalkRules(m, func(r *Rule) bool { + safe := ReservedVars.Copy() + safe.Update(r.Head.Args.Vars()) + r.Body = c.checkBodySafety(safe, r.Body) + return false + }) + } +} + +func (c *Compiler) checkBodySafety(safe VarSet, b Body) Body { + reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b) + if errs := safetyErrorSlice(unsafe, c.RewrittenVars); len(errs) > 0 { + for _, err := range errs { + c.err(err) + } + return b + } + return reordered +} + +// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting +// variables during the safety check. This has to be exported because it's relied on +// by the copy propagation implementation in topdown. +var SafetyCheckVisitorParams = VarVisitorParams{ + SkipRefCallHead: true, + SkipClosures: true, +} + +// checkSafetyRuleHeads ensures that variables appearing in the head of a +// rule also appear in the body. +func (c *Compiler) checkSafetyRuleHeads() { + + for _, name := range c.sorted { + m := c.Modules[name] + WalkRules(m, func(r *Rule) bool { + safe := r.Body.Vars(SafetyCheckVisitorParams) + safe.Update(r.Head.Args.Vars()) + unsafe := r.Head.Vars().Diff(safe) + for v := range unsafe { + if w, ok := c.RewrittenVars[v]; ok { + v = w + } + if !v.IsGenerated() { + c.err(NewError(UnsafeVarErr, r.Loc(), "var %v is unsafe", v)) + } + } + return false + }) + } +} + +func compileSchema(goSchema interface{}, allowNet []string) (*gojsonschema.Schema, error) { + gojsonschema.SetAllowNet(allowNet) + + var refLoader gojsonschema.JSONLoader + sl := gojsonschema.NewSchemaLoader() + + if goSchema != nil { + refLoader = gojsonschema.NewGoLoader(goSchema) + } else { + return nil, errors.New("no schema as input to compile") + } + schemasCompiled, err := sl.Compile(refLoader) + if err != nil { + return nil, fmt.Errorf("unable to compile the schema: %w", err) + } + return schemasCompiled, nil +} + +func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema, error) { + if len(schemas) == 0 { + return nil, nil + } + var result = schemas[0] + + for i := range schemas { + if len(schemas[i].PropertiesChildren) > 0 { + if !schemas[i].Types.Contains("object") { + if err := schemas[i].Types.Add("object"); err != nil { + return nil, errors.New("unable to set the type in schemas") + } + } + } else if len(schemas[i].ItemsChildren) > 0 { + if !schemas[i].Types.Contains("array") { + if err := schemas[i].Types.Add("array"); err != nil { + return nil, errors.New("unable to set the type in schemas") + } + } + } + } + + for i := 1; i < len(schemas); i++ { + if result.Types.String() != schemas[i].Types.String() { + return nil, fmt.Errorf("unable to merge these schemas: type mismatch: %v and %v", result.Types.String(), schemas[i].Types.String()) + } else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 { + result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...) + } else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 { + for j := range len(schemas[i].ItemsChildren) { + if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) { + result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j]) + } + if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() { + return nil, errors.New("unable to merge these schemas") + } + } + } + } + return result, nil +} + +type schemaParser struct { + definitionCache map[string]*cachedDef +} + +type cachedDef struct { + properties []*types.StaticProperty +} + +func newSchemaParser() *schemaParser { + return &schemaParser{ + definitionCache: map[string]*cachedDef{}, + } +} + +func (parser *schemaParser) parseSchema(schema interface{}) (types.Type, error) { + return parser.parseSchemaWithPropertyKey(schema, "") +} + +func (parser *schemaParser) parseSchemaWithPropertyKey(schema interface{}, propertyKey string) (types.Type, error) { + subSchema, ok := schema.(*gojsonschema.SubSchema) + if !ok { + return nil, fmt.Errorf("unexpected schema type %v", subSchema) + } + + // Handle referenced schemas, returns directly when a $ref is found + if subSchema.RefSchema != nil { + if existing, ok := parser.definitionCache[subSchema.Ref.String()]; ok { + return types.NewObject(existing.properties, nil), nil + } + return parser.parseSchemaWithPropertyKey(subSchema.RefSchema, subSchema.Ref.String()) + } + + // Handle anyOf + if subSchema.AnyOf != nil { + var orType types.Type + + // If there is a core schema, find its type first + if subSchema.Types.IsTyped() { + copySchema := *subSchema + copySchemaRef := ©Schema + copySchemaRef.AnyOf = nil + coreType, err := parser.parseSchema(copySchemaRef) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v: %w", subSchema, err) + } + + // Only add Object type with static props to orType + if objType, ok := coreType.(*types.Object); ok { + if objType.StaticProperties() != nil && objType.DynamicProperties() == nil { + orType = types.Or(orType, coreType) + } + } + } + + // Iterate through every property of AnyOf and add it to orType + for _, pSchema := range subSchema.AnyOf { + newtype, err := parser.parseSchema(pSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) + } + orType = types.Or(newtype, orType) + } + + return orType, nil + } + + if subSchema.AllOf != nil { + subSchemaArray := subSchema.AllOf + allOfResult, err := mergeSchemas(subSchemaArray...) + if err != nil { + return nil, err + } + + if subSchema.Types.IsTyped() { + if (subSchema.Types.Contains("object") && allOfResult.Types.Contains("object")) || (subSchema.Types.Contains("array") && allOfResult.Types.Contains("array")) { + objectOrArrayResult, err := mergeSchemas(allOfResult, subSchema) + if err != nil { + return nil, err + } + return parser.parseSchema(objectOrArrayResult) + } else if subSchema.Types.String() != allOfResult.Types.String() { + return nil, errors.New("unable to merge these schemas") + } + } + return parser.parseSchema(allOfResult) + } + + if subSchema.Types.IsTyped() { + if subSchema.Types.Contains("boolean") { + return types.B, nil + + } else if subSchema.Types.Contains("string") { + return types.S, nil + + } else if subSchema.Types.Contains("integer") || subSchema.Types.Contains("number") { + return types.N, nil + + } else if subSchema.Types.Contains("object") { + if len(subSchema.PropertiesChildren) > 0 { + def := &cachedDef{ + properties: make([]*types.StaticProperty, 0, len(subSchema.PropertiesChildren)), + } + for _, pSchema := range subSchema.PropertiesChildren { + def.properties = append(def.properties, types.NewStaticProperty(pSchema.Property, nil)) + } + if propertyKey != "" { + parser.definitionCache[propertyKey] = def + } + for _, pSchema := range subSchema.PropertiesChildren { + newtype, err := parser.parseSchema(pSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) + } + for i, prop := range def.properties { + if prop.Key == pSchema.Property { + def.properties[i].Value = newtype + break + } + } + } + return types.NewObject(def.properties, nil), nil + } + return types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), nil + + } else if subSchema.Types.Contains("array") { + if len(subSchema.ItemsChildren) > 0 { + if subSchema.ItemsChildrenIsSingleSchema { + iSchema := subSchema.ItemsChildren[0] + newtype, err := parser.parseSchema(iSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v", iSchema) + } + return types.NewArray(nil, newtype), nil + } + newTypes := make([]types.Type, 0, len(subSchema.ItemsChildren)) + for i := 0; i != len(subSchema.ItemsChildren); i++ { + iSchema := subSchema.ItemsChildren[i] + newtype, err := parser.parseSchema(iSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v", iSchema) + } + newTypes = append(newTypes, newtype) + } + return types.NewArray(newTypes, nil), nil + } + return types.NewArray(nil, types.A), nil + } + } + + // Assume types if not specified in schema + if len(subSchema.PropertiesChildren) > 0 { + if err := subSchema.Types.Add("object"); err == nil { + return parser.parseSchema(subSchema) + } + } else if len(subSchema.ItemsChildren) > 0 { + if err := subSchema.Types.Add("array"); err == nil { + return parser.parseSchema(subSchema) + } + } + + return types.A, nil +} + +func (c *Compiler) setAnnotationSet() { + // Sorting modules by name for stable error reporting + sorted := make([]*Module, 0, len(c.Modules)) + for _, mName := range c.sorted { + sorted = append(sorted, c.Modules[mName]) + } + + as, errs := BuildAnnotationSet(sorted) + for _, err := range errs { + c.err(err) + } + c.annotationSet = as +} + +// checkTypes runs the type checker on all rules. The type checker builds a +// TypeEnv that is stored on the compiler. +func (c *Compiler) checkTypes() { + // Recursion is caught in earlier step, so this cannot fail. + sorted, _ := c.Graph.Sort() + checker := newTypeChecker(). + WithAllowNet(c.capabilities.AllowNet). + WithSchemaSet(c.schemaSet). + WithInputType(c.inputType). + WithBuiltins(c.builtins). + WithRequiredCapabilities(c.Required). + WithVarRewriter(rewriteVarsInRef(c.RewrittenVars)). + WithAllowUndefinedFunctionCalls(c.allowUndefinedFuncCalls) + var as *AnnotationSet + if c.useTypeCheckAnnotations { + as = c.annotationSet + } + env, errs := checker.CheckTypes(c.TypeEnv, sorted, as) + for _, err := range errs { + c.err(err) + } + c.TypeEnv = env +} + +func (c *Compiler) checkUnsafeBuiltins() { + if len(c.unsafeBuiltinsMap) == 0 { + return + } + + for _, name := range c.sorted { + errs := checkUnsafeBuiltins(c.unsafeBuiltinsMap, c.Modules[name]) + for _, err := range errs { + c.err(err) + } + } +} + +func (c *Compiler) checkDeprecatedBuiltins() { + checkNeeded := false + for _, b := range c.Required.Builtins { + if _, found := c.deprecatedBuiltinsMap[b.Name]; found { + checkNeeded = true + break + } + } + if !checkNeeded { + return + } + + for _, name := range c.sorted { + mod := c.Modules[name] + if c.strict || mod.regoV1Compatible() { + errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, mod) + for _, err := range errs { + c.err(err) + } + } + } +} + +func (c *Compiler) runStage(metricName string, f func()) { + if c.metrics != nil { + c.metrics.Timer(metricName).Start() + defer c.metrics.Timer(metricName).Stop() + } + f() +} + +func (c *Compiler) runStageAfter(metricName string, s CompilerStage) *Error { + if c.metrics != nil { + c.metrics.Timer(metricName).Start() + defer c.metrics.Timer(metricName).Stop() + } + return s(c) +} + +func (c *Compiler) compile() { + + defer func() { + if r := recover(); r != nil && r != errLimitReached { + panic(r) + } + }() + + for _, s := range c.stages { + if c.evalMode == EvalModeIR { + switch s.name { + case "BuildRuleIndices", "BuildComprehensionIndices": + continue // skip these stages + } + } + + if c.allowUndefinedFuncCalls && (s.name == "CheckUndefinedFuncs" || s.name == "CheckSafetyRuleBodies") { + continue + } + + c.runStage(s.metricName, s.f) + if c.Failed() { + return + } + for _, a := range c.after[s.name] { + if err := c.runStageAfter(a.MetricName, a.Stage); err != nil { + c.err(err) + return + } + } + } +} + +func (c *Compiler) init() { + + if c.initialized { + return + } + + if c.capabilities == nil { + c.capabilities = CapabilitiesForThisVersion() + } + + c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) + + for _, bi := range c.capabilities.Builtins { + c.builtins[bi.Name] = bi + if bi.IsDeprecated() { + c.deprecatedBuiltinsMap[bi.Name] = struct{}{} + } + } + + for name, bi := range c.customBuiltins { + c.builtins[name] = bi + } + + // Load the global input schema if one was provided. + if c.schemaSet != nil { + if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { + tpe, err := loadSchema(schema, c.capabilities.AllowNet) + if err != nil { + c.err(NewError(TypeErr, nil, err.Error())) //nolint:govet + } else { + c.inputType = tpe + } + } + } + + c.TypeEnv = newTypeChecker(). + WithSchemaSet(c.schemaSet). + WithInputType(c.inputType). + Env(c.builtins) + + c.initialized = true +} + +func (c *Compiler) err(err *Error) { + if c.maxErrs > 0 && len(c.Errors) >= c.maxErrs { + c.Errors = append(c.Errors, errLimitReached) + panic(errLimitReached) + } + c.Errors = append(c.Errors, err) +} + +func (c *Compiler) getExports() *util.HasherMap[Ref, []Ref] { + + rules := util.NewHasherMap[Ref, []Ref](RefEqual) + + for _, name := range c.sorted { + mod := c.Modules[name] + + for _, rule := range mod.Rules { + hashMapAdd(rules, mod.Package.Path, rule.Head.Ref().GroundPrefix()) + } + } + + return rules +} + +func refSliceEqual(a, b []Ref) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !a[i].Equal(b[i]) { + return false + } + } + return true +} + +func hashMapAdd(rules *util.HasherMap[Ref, []Ref], pkg, rule Ref) { + prev, ok := rules.Get(pkg) + if !ok { + rules.Put(pkg, []Ref{rule}) + return + } + for _, p := range prev { + if p.Equal(rule) { + return + } + } + rules.Put(pkg, append(prev, rule)) +} + +func (c *Compiler) GetAnnotationSet() *AnnotationSet { + return c.annotationSet +} + +func (c *Compiler) checkImports() { + modules := make([]*Module, 0, len(c.Modules)) + + supportsRegoV1Import := c.capabilities.ContainsFeature(FeatureRegoV1Import) || + c.capabilities.ContainsFeature(FeatureRegoV1) + + for _, name := range c.sorted { + mod := c.Modules[name] + + for _, imp := range mod.Imports { + if !supportsRegoV1Import && RegoV1CompatibleRef.Equal(imp.Path.Value) { + c.err(NewError(CompileErr, imp.Loc(), "rego.v1 import is not supported")) + } + } + + if c.strict || c.moduleIsRegoV1Compatible(mod) { + modules = append(modules, mod) + } + } + + errs := checkDuplicateImports(modules) + for _, err := range errs { + c.err(err) + } +} + +func (c *Compiler) checkKeywordOverrides() { + for _, name := range c.sorted { + mod := c.Modules[name] + if c.strict || c.moduleIsRegoV1Compatible(mod) { + errs := checkRootDocumentOverrides(mod) + for _, err := range errs { + c.err(err) + } + } + } +} + +func (c *Compiler) moduleIsRegoV1(mod *Module) bool { + if mod.regoVersion == RegoUndefined { + switch c.defaultRegoVersion { + case RegoUndefined: + c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module")) + return false + case RegoV1: + return true + } + return false + } + return mod.regoVersion == RegoV1 +} + +func (c *Compiler) moduleIsRegoV1Compatible(mod *Module) bool { + if mod.regoVersion == RegoUndefined { + switch c.defaultRegoVersion { + case RegoUndefined: + c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module")) + return false + case RegoV1, RegoV0CompatV1: + return true + } + return false + } + return mod.regoV1Compatible() +} + +// resolveAllRefs resolves references in expressions to their fully qualified values. +// +// For instance, given the following module: +// +// package a.b +// import data.foo.bar +// p[x] { bar[_] = x } +// +// The reference "bar[_]" would be resolved to "data.foo.bar[_]". +// +// Ref rules are resolved, too: +// +// package a.b +// q { c.d.e == 1 } +// c.d[e] := 1 if e := "e" +// +// The reference "c.d.e" would be resolved to "data.a.b.c.d.e". +func (c *Compiler) resolveAllRefs() { + + rules := c.getExports() + + for _, name := range c.sorted { + mod := c.Modules[name] + + var ruleExports []Ref + if x, ok := rules.Get(mod.Package.Path); ok { + ruleExports = x + } + + globals := getGlobals(mod.Package, ruleExports, mod.Imports) + + WalkRules(mod, func(rule *Rule) bool { + err := resolveRefsInRule(globals, rule) + if err != nil { + c.err(NewError(CompileErr, rule.Location, err.Error())) //nolint:govet + } + return false + }) + + if c.strict { // check for unused imports + for _, imp := range mod.Imports { + path := imp.Path.Value.(Ref) + if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { + continue // ignore future and rego imports + } + + for v, u := range globals { + if v.Equal(imp.Name()) && !u.used { + c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String())) + } + } + } + } + } + + if c.moduleLoader != nil { + + parsed, err := c.moduleLoader(c.Modules) + if err != nil { + c.err(NewError(CompileErr, nil, err.Error())) //nolint:govet + return + } + + if len(parsed) == 0 { + return + } + + for id, module := range parsed { + c.Modules[id] = module.Copy() + c.sorted = append(c.sorted, id) + if c.parsedModules != nil { + c.parsedModules[id] = module + } + } + + sort.Strings(c.sorted) + c.resolveAllRefs() + } +} + +func (c *Compiler) removeImports() { + c.imports = make(map[string][]*Import, len(c.Modules)) + for name := range c.Modules { + c.imports[name] = c.Modules[name].Imports + c.Modules[name].Imports = nil + } +} + +func (c *Compiler) initLocalVarGen() { + c.localvargen = newLocalVarGeneratorForModuleSet(c.sorted, c.Modules) +} + +func (c *Compiler) rewriteComprehensionTerms() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + _, _ = rewriteComprehensionTerms(f, mod) // ignore error + } +} + +func (c *Compiler) rewriteExprTerms() { + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(rule *Rule) bool { + rewriteExprTermsInHead(c.localvargen, rule) + rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body) + return false + }) + } +} + +func (c *Compiler) rewriteRuleHeadRefs() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(rule *Rule) bool { + + ref := rule.Head.Ref() + // NOTE(sr): We're backfilling Refs here -- all parser code paths would have them, but + // it's possible to construct Module{} instances from Golang code, so we need + // to accommodate for that, too. + if len(rule.Head.Reference) == 0 { + rule.Head.Reference = ref + } + + cannotSpeakStringPrefixRefs := true + cannotSpeakGeneralRefs := true + for _, f := range c.capabilities.Features { + switch f { + case FeatureRefHeadStringPrefixes: + cannotSpeakStringPrefixRefs = false + case FeatureRefHeads: + cannotSpeakGeneralRefs = false + case FeatureRegoV1: + cannotSpeakStringPrefixRefs = false + cannotSpeakGeneralRefs = false + } + } + + if cannotSpeakStringPrefixRefs && cannotSpeakGeneralRefs && rule.Head.Name == "" { + c.err(NewError(CompileErr, rule.Loc(), "rule heads with refs are not supported: %v", rule.Head.Reference)) + return true + } + + for i := 1; i < len(ref); i++ { + if cannotSpeakGeneralRefs && (rule.Head.RuleKind() == MultiValue || i != len(ref)-1) { // last + if _, ok := ref[i].Value.(String); !ok { + c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference)) + continue + } + } + + // Rewrite so that any non-scalar elements in the rule's ref are vars: + // p.q.r[y.z] { ... } => p.q.r[__local0__] { __local0__ = y.z } + // p.q[a.b][c.d] { ... } => p.q[__local0__] { __local0__ = a.b; __local1__ = c.d } + // because that's what the RuleTree knows how to deal with. + if _, ok := ref[i].Value.(Var); !ok && !IsScalar(ref[i].Value) { + expr := f.Generate(ref[i]) + if i == len(ref)-1 && rule.Head.Key.Equal(ref[i]) { + rule.Head.Key = expr.Operand(0) + } + rule.Head.Reference[i] = expr.Operand(0) + rule.Body.Append(expr) + } + } + + return true + }) + } +} + +func (c *Compiler) checkVoidCalls() { + for _, name := range c.sorted { + mod := c.Modules[name] + for _, err := range checkVoidCalls(c.TypeEnv, mod) { + c.err(err) + } + } +} + +func (c *Compiler) rewritePrintCalls() { + var modified bool + if !c.enablePrintStatements { + for _, name := range c.sorted { + if erasePrintCalls(c.Modules[name]) { + modified = true + } + } + } else { + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(r *Rule) bool { + safe := r.Head.Args.Vars() + safe.Update(ReservedVars) + vis := func(b Body) bool { + modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, safe, b) + if modrec { + modified = true + } + for _, err := range errs { + c.err(err) + } + return false + } + WalkBodies(r.Head, vis) + WalkBodies(r.Body, vis) + return false + }) + } + } + if modified { + c.Required.addBuiltinSorted(Print) + } +} + +// checkVoidCalls returns errors for any expressions that treat void function +// calls as values. The only void functions in Rego are specific built-ins like +// print(). +func checkVoidCalls(env *TypeEnv, x interface{}) Errors { + var errs Errors + WalkTerms(x, func(x *Term) bool { + if call, ok := x.Value.(Call); ok { + if tpe, ok := env.Get(call[0]).(*types.Function); ok && tpe.Result() == nil { + errs = append(errs, NewError(TypeErr, x.Loc(), "%v used as value", call)) + } + } + return false + }) + return errs +} + +// rewritePrintCalls will rewrite the body so that print operands are captured +// in local variables and their evaluation occurs within a comprehension. +// Wrapping the terms inside of a comprehension ensures that undefined values do +// not short-circuit evaluation. +// +// For example, given the following print statement: +// +// print("the value of x is:", input.x) +// +// The expression would be rewritten to: +// +// print({__local0__ | __local0__ = "the value of x is:"}, {__local1__ | __local1__ = input.x}) +func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals VarSet, body Body) (bool, Errors) { + + var errs Errors + var modified bool + + // Visit comprehension bodies recursively to ensure print statements inside + // those bodies only close over variables that are safe. + for i := range body { + if ContainsClosures(body[i]) { + safe := outputVarsForBody(body[:i], getArity, globals) + safe.Update(globals) + WalkClosures(body[i], func(x interface{}) bool { + var modrec bool + var errsrec Errors + switch x := x.(type) { + case *SetComprehension: + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + case *ArrayComprehension: + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + case *ObjectComprehension: + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + case *Every: + safe.Update(x.KeyValueVars()) + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + } + if modrec { + modified = true + } + errs = append(errs, errsrec...) + return true + }) + if len(errs) > 0 { + return false, errs + } + } + } + + for i := range body { + + if !isPrintCall(body[i]) { + continue + } + + modified = true + + var errs Errors + safe := outputVarsForBody(body[:i], getArity, globals) + safe.Update(globals) + args := body[i].Operands() + + for j := range args { + vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) + vis.Walk(args[j]) + unsafe := vis.Vars().Diff(safe) + for _, v := range unsafe.Sorted() { + errs = append(errs, NewError(CompileErr, args[j].Loc(), "var %v is undeclared", v)) + } + } + + if len(errs) > 0 { + return false, errs + } + + arr := NewArray() + + for j := range args { + x := NewTerm(gen.Generate()).SetLocation(args[j].Loc()) + capture := Equality.Expr(x, args[j]).SetLocation(args[j].Loc()) + arr = arr.Append(SetComprehensionTerm(x, NewBody(capture)).SetLocation(args[j].Loc())) + } + + body.Set(NewExpr([]*Term{ + NewTerm(InternalPrint.Ref()).SetLocation(body[i].Loc()), + NewTerm(arr).SetLocation(body[i].Loc()), + }).SetLocation(body[i].Loc()), i) + } + + return modified, nil +} + +func erasePrintCalls(node interface{}) bool { + var modified bool + NewGenericVisitor(func(x interface{}) bool { + var modrec bool + switch x := x.(type) { + case *Rule: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *ArrayComprehension: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *SetComprehension: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *ObjectComprehension: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *Every: + modrec, x.Body = erasePrintCallsInBody(x.Body) + } + if modrec { + modified = true + } + return false + }).Walk(node) + return modified +} + +func erasePrintCallsInBody(x Body) (bool, Body) { + + if !containsPrintCall(x) { + return false, x + } + + var cpy Body + + for i := range x { + + // Recursively visit any comprehensions contained in this expression. + erasePrintCalls(x[i]) + + if !isPrintCall(x[i]) { + cpy.Append(x[i]) + } + } + + if len(cpy) == 0 { + term := BooleanTerm(true).SetLocation(x.Loc()) + expr := NewExpr(term).SetLocation(x.Loc()) + cpy.Append(expr) + } + + return true, cpy +} + +func containsPrintCall(x interface{}) bool { + var found bool + WalkExprs(x, func(expr *Expr) bool { + if !found { + if isPrintCall(expr) { + found = true + } + } + return found + }) + return found +} + +var printRef = Print.Ref() + +func isPrintCall(x *Expr) bool { + return x.IsCall() && x.Operator().Equal(printRef) +} + +// rewriteRefsInHead will rewrite rules so that the head does not contain any +// terms that require evaluation (e.g., refs or comprehensions). If the key or +// value contains one or more of these terms, the key or value will be moved +// into the body and assigned to a new variable. The new variable will replace +// the key or value in the head. +// +// For instance, given the following rule: +// +// p[{"foo": data.foo[i]}] { i < 100 } +// +// The rule would be re-written as: +// +// p[__local0__] { i < 100; __local0__ = {"foo": data.foo[i]} } +func (c *Compiler) rewriteRefsInHead() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(rule *Rule) bool { + if requiresEval(rule.Head.Key) { + expr := f.Generate(rule.Head.Key) + rule.Head.Key = expr.Operand(0) + rule.Body.Append(expr) + } + if requiresEval(rule.Head.Value) { + expr := f.Generate(rule.Head.Value) + rule.Head.Value = expr.Operand(0) + rule.Body.Append(expr) + } + for i := 0; i < len(rule.Head.Args); i++ { + if requiresEval(rule.Head.Args[i]) { + expr := f.Generate(rule.Head.Args[i]) + rule.Head.Args[i] = expr.Operand(0) + rule.Body.Append(expr) + } + } + return false + }) + } +} + +func (c *Compiler) rewriteEquals() { + modified := false + for _, name := range c.sorted { + mod := c.Modules[name] + modified = rewriteEquals(mod) || modified + } + if modified { + c.Required.addBuiltinSorted(Equal) + } +} + +func (c *Compiler) rewriteDynamicTerms() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(rule *Rule) bool { + rule.Body = rewriteDynamics(f, rule.Body) + return false + }) + } +} + +// rewriteTestRuleEqualities rewrites equality expressions in test rule bodies to create local vars for statements that would otherwise +// not have their values captured through tracing, such as refs and comprehensions not unified/assigned to a local var. +// For example, given the following module: +// +// package test +// +// p.q contains v if { +// some v in numbers.range(1, 3) +// } +// +// p.r := "foo" +// +// test_rule { +// p == { +// "q": {4, 5, 6} +// } +// } +// +// `p` in `test_rule` resolves to `data.test.p`, which won't be an entry in the virtual-cache and must therefore be calculated after-the-fact. +// If `p` isn't captured in a local var, there is no trivial way to retrieve its value for test reporting. +func (c *Compiler) rewriteTestRuleEqualities() { + if !c.rewriteTestRulesForTracing { + return + } + + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(rule *Rule) bool { + if strings.HasPrefix(string(rule.Head.Name), "test_") { + rule.Body = rewriteTestEqualities(f, rule.Body) + } + return false + }) + } +} + +func (c *Compiler) parseMetadataBlocks() { + // Only parse annotations if rego.metadata built-ins are called + regoMetadataCalled := false + for _, name := range c.sorted { + mod := c.Modules[name] + WalkExprs(mod, func(expr *Expr) bool { + if isRegoMetadataChainCall(expr) || isRegoMetadataRuleCall(expr) { + regoMetadataCalled = true + } + return regoMetadataCalled + }) + + if regoMetadataCalled { + break + } + } + + if regoMetadataCalled { + // NOTE: Possible optimization: only parse annotations for modules on the path of rego.metadata-calling module + for _, name := range c.sorted { + mod := c.Modules[name] + + if len(mod.Annotations) == 0 { + var errs Errors + mod.Annotations, errs = parseAnnotations(mod.Comments) + errs = append(errs, attachAnnotationsNodes(mod)...) + for _, err := range errs { + c.err(err) + } + + attachRuleAnnotations(mod) + } + } + } +} + +func (c *Compiler) rewriteRegoMetadataCalls() { + eqFactory := newEqualityFactory(c.localvargen) + + _, chainFuncAllowed := c.builtins[RegoMetadataChain.Name] + _, ruleFuncAllowed := c.builtins[RegoMetadataRule.Name] + + for _, name := range c.sorted { + mod := c.Modules[name] + + WalkRules(mod, func(rule *Rule) bool { + var firstChainCall *Expr + var firstRuleCall *Expr + + WalkExprs(rule, func(expr *Expr) bool { + if chainFuncAllowed && firstChainCall == nil && isRegoMetadataChainCall(expr) { + firstChainCall = expr + } else if ruleFuncAllowed && firstRuleCall == nil && isRegoMetadataRuleCall(expr) { + firstRuleCall = expr + } + return firstChainCall != nil && firstRuleCall != nil + }) + + chainCalled := firstChainCall != nil + ruleCalled := firstRuleCall != nil + + if chainCalled || ruleCalled { + body := make(Body, 0, len(rule.Body)+2) + + var metadataChainVar Var + if chainCalled { + // Create and inject metadata chain for rule + + chain, err := createMetadataChain(c.annotationSet.Chain(rule)) + if err != nil { + c.err(err) + return false + } + + chain.Location = firstChainCall.Location + eq := eqFactory.Generate(chain) + metadataChainVar = eq.Operands()[0].Value.(Var) + body.Append(eq) + } + + var metadataRuleVar Var + if ruleCalled { + // Create and inject metadata for rule + + var metadataRuleTerm *Term + + a := getPrimaryRuleAnnotations(c.annotationSet, rule) + if a != nil { + annotObj, err := a.toObject() + if err != nil { + c.err(err) + return false + } + metadataRuleTerm = NewTerm(*annotObj) + } else { + // If rule has no annotations, assign an empty object + metadataRuleTerm = ObjectTerm() + } + + metadataRuleTerm.Location = firstRuleCall.Location + eq := eqFactory.Generate(metadataRuleTerm) + metadataRuleVar = eq.Operands()[0].Value.(Var) + body.Append(eq) + } + + for _, expr := range rule.Body { + body.Append(expr) + } + rule.Body = body + + vis := func(b Body) bool { + for _, err := range rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars) { + c.err(err) + } + return false + } + WalkBodies(rule.Head, vis) + WalkBodies(rule.Body, vis) + } + + return false + }) + } +} + +func getPrimaryRuleAnnotations(as *AnnotationSet, rule *Rule) *Annotations { + annots := as.GetRuleScope(rule) + + if len(annots) == 0 { + return nil + } + + // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward + slices.SortStableFunc(annots, func(a, b *Annotations) int { + return -a.Location.Compare(b.Location) + }) + + return annots[0] +} + +func rewriteRegoMetadataCalls(metadataChainVar *Var, metadataRuleVar *Var, body Body, rewrittenVars *map[Var]Var) Errors { + var errs Errors + + WalkClosures(body, func(x interface{}) bool { + switch x := x.(type) { + case *ArrayComprehension: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + case *SetComprehension: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + case *ObjectComprehension: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + case *Every: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + } + return true + }) + + for i := range body { + expr := body[i] + var metadataVar Var + + if metadataChainVar != nil && isRegoMetadataChainCall(expr) { + metadataVar = *metadataChainVar + } else if metadataRuleVar != nil && isRegoMetadataRuleCall(expr) { + metadataVar = *metadataRuleVar + } else { + continue + } + + // NOTE(johanfylling): An alternative strategy would be to walk the body and replace all operands[0] + // usages with *metadataChainVar + operands := expr.Operands() + var newExpr *Expr + if len(operands) > 0 { // There is an output var to rewrite + rewrittenVar := operands[0] + newExpr = Equality.Expr(rewrittenVar, NewTerm(metadataVar)) + } else { // No output var, just rewrite expr to metadataVar + newExpr = NewExpr(NewTerm(metadataVar)) + } + + newExpr.Generated = true + newExpr.Location = expr.Location + body.Set(newExpr, i) + } + + return errs +} + +var regoMetadataChainRef = RegoMetadataChain.Ref() +var regoMetadataRuleRef = RegoMetadataRule.Ref() + +func isRegoMetadataChainCall(x *Expr) bool { + return x.IsCall() && x.Operator().Equal(regoMetadataChainRef) +} + +func isRegoMetadataRuleCall(x *Expr) bool { + return x.IsCall() && x.Operator().Equal(regoMetadataRuleRef) +} + +func createMetadataChain(chain []*AnnotationsRef) (*Term, *Error) { + + metaArray := NewArray() + for _, link := range chain { + p := link.Path.toArray(). + Slice(1, -1) // Dropping leading 'data' element of path + obj := NewObject( + Item(pathTerm, NewTerm(p)), + ) + if link.Annotations != nil { + annotObj, err := link.Annotations.toObject() + if err != nil { + return nil, err + } + obj.Insert(annotationsTerm, NewTerm(*annotObj)) + } + metaArray = metaArray.Append(NewTerm(obj)) + } + + return NewTerm(metaArray), nil +} + +func (c *Compiler) rewriteLocalVars() { + + var assignment bool + + for _, name := range c.sorted { + mod := c.Modules[name] + gen := c.localvargen + + WalkRules(mod, func(rule *Rule) bool { + argsStack := newLocalDeclaredVars() + + args := NewVarVisitor() + if c.strict { + args.Walk(rule.Head.Args) + } + unusedArgs := args.Vars() + + c.rewriteLocalArgVars(gen, argsStack, rule) + + // Rewrite local vars in each else-branch of the rule. + // Note: this is done instead of a walk so that we can capture any unused function arguments + // across else-branches. + for rule := rule; rule != nil; rule = rule.Else { + stack, errs := c.rewriteLocalVarsInRule(rule, unusedArgs, argsStack, gen) + if stack.assignment { + assignment = true + } + + for arg := range unusedArgs { + if stack.Count(arg) > 1 { + delete(unusedArgs, arg) + } + } + + for _, err := range errs { + c.err(err) + } + } + + if c.strict { + // Report an error for each unused function argument + for arg := range unusedArgs { + if !arg.IsWildcard() { + c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg)) + } + } + } + + return true + }) + } + + if assignment { + c.Required.addBuiltinSorted(Assign) + } +} + +func (c *Compiler) rewriteLocalVarsInRule(rule *Rule, unusedArgs VarSet, argsStack *localDeclaredVars, gen *localVarGenerator) (*localDeclaredVars, Errors) { + // Rewrite assignments contained in head of rule. Assignments can + // occur in rule head if they're inside a comprehension. Note, + // assigned vars in comprehensions in the head will be rewritten + // first to preserve scoping rules. For example: + // + // p = [x | x := 1] { x := 2 } becomes p = [__local0__ | __local0__ = 1] { __local1__ = 2 } + // + // This behaviour is consistent scoping inside the body. For example: + // + // p = xs { x := 2; xs = [x | x := 1] } becomes p = xs { __local0__ = 2; xs = [__local1__ | __local1__ = 1] } + nestedXform := &rewriteNestedHeadVarLocalTransform{ + gen: gen, + RewrittenVars: c.RewrittenVars, + strict: c.strict, + } + + NewGenericVisitor(nestedXform.Visit).Walk(rule.Head) + + for _, err := range nestedXform.errs { + c.err(err) + } + + // Rewrite assignments in body. + used := NewVarSet() + + for _, t := range rule.Head.Ref()[1:] { + used.Update(t.Vars()) + } + + if rule.Head.Key != nil { + used.Update(rule.Head.Key.Vars()) + } + + if rule.Head.Value != nil { + valueVars := rule.Head.Value.Vars() + used.Update(valueVars) + for arg := range unusedArgs { + if valueVars.Contains(arg) { + delete(unusedArgs, arg) + } + } + } + + stack := argsStack.Copy() + + body, declared, errs := rewriteLocalVars(gen, stack, used, rule.Body, c.strict) + + // For rewritten vars use the collection of all variables that + // were in the stack at some point in time. + maps.Copy(c.RewrittenVars, stack.rewritten) + + rule.Body = body + + // Rewrite vars in head that refer to locally declared vars in the body. + localXform := rewriteHeadVarLocalTransform{declared: declared} + + for i := range rule.Head.Args { + rule.Head.Args[i], _ = transformTerm(localXform, rule.Head.Args[i]) + } + + for i := 1; i < len(rule.Head.Ref()); i++ { + rule.Head.Reference[i], _ = transformTerm(localXform, rule.Head.Ref()[i]) + } + if rule.Head.Key != nil { + rule.Head.Key, _ = transformTerm(localXform, rule.Head.Key) + } + + if rule.Head.Value != nil { + rule.Head.Value, _ = transformTerm(localXform, rule.Head.Value) + } + return stack, errs +} + +type rewriteNestedHeadVarLocalTransform struct { + gen *localVarGenerator + errs Errors + RewrittenVars map[Var]Var + strict bool +} + +func (xform *rewriteNestedHeadVarLocalTransform) Visit(x interface{}) bool { + + if term, ok := x.(*Term); ok { + + stop := false + stack := newLocalDeclaredVars() + + switch x := term.Value.(type) { + case *object: + cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { + kcpy := k.Copy() + NewGenericVisitor(xform.Visit).Walk(kcpy) + vcpy := v.Copy() + NewGenericVisitor(xform.Visit).Walk(vcpy) + return kcpy, vcpy, nil + }) + term.Value = cpy + stop = true + case *set: + cpy, _ := x.Map(func(v *Term) (*Term, error) { + vcpy := v.Copy() + NewGenericVisitor(xform.Visit).Walk(vcpy) + return vcpy, nil + }) + term.Value = cpy + stop = true + case *ArrayComprehension: + xform.errs = rewriteDeclaredVarsInArrayComprehension(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + case *SetComprehension: + xform.errs = rewriteDeclaredVarsInSetComprehension(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + case *ObjectComprehension: + xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + } + + maps.Copy(xform.RewrittenVars, stack.rewritten) + + return stop + } + + return false +} + +type rewriteHeadVarLocalTransform struct { + declared map[Var]Var +} + +func (xform rewriteHeadVarLocalTransform) Transform(x interface{}) (interface{}, error) { + if v, ok := x.(Var); ok { + if gv, ok := xform.declared[v]; ok { + return gv, nil + } + } + return x, nil +} + +func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) { + + vis := &ruleArgLocalRewriter{ + stack: stack, + gen: gen, + } + + for i := range rule.Head.Args { + Walk(vis, rule.Head.Args[i]) + } + + for i := range vis.errs { + c.err(vis.errs[i]) + } +} + +type ruleArgLocalRewriter struct { + stack *localDeclaredVars + gen *localVarGenerator + errs []*Error +} + +func (vis *ruleArgLocalRewriter) Visit(x interface{}) Visitor { + + t, ok := x.(*Term) + if !ok { + return vis + } + + switch v := t.Value.(type) { + case Var: + gv, ok := vis.stack.Declared(v) + if ok { + vis.stack.Seen(v) + } else { + gv = vis.gen.Generate() + vis.stack.Insert(v, gv, argVar) + } + t.Value = gv + return nil + case *object: + if cpy, err := v.Map(func(k, v *Term) (*Term, *Term, error) { + vcpy := v.Copy() + Walk(vis, vcpy) + return k, vcpy, nil + }); err != nil { + vis.errs = append(vis.errs, NewError(CompileErr, t.Location, err.Error())) //nolint:govet + } else { + t.Value = cpy + } + return nil + case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set: + // Scalars are no-ops. Comprehensions are handled above. Sets must not + // contain variables. + return nil + case Call: + vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "rule arguments cannot contain calls")) + return nil + default: + // Recurse on refs and arrays. Any embedded + // variables can be rewritten. + return vis + } +} + +func (c *Compiler) rewriteWithModifiers() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + t := NewGenericTransformer(func(x interface{}) (interface{}, error) { + body, ok := x.(Body) + if !ok { + return x, nil + } + body, err := rewriteWithModifiersInBody(c, c.unsafeBuiltinsMap, f, body) + if err != nil { + c.err(err) + } + + return body, nil + }) + _, _ = Transform(t, mod) // ignore error + } +} + +func (c *Compiler) setModuleTree() { + c.ModuleTree = NewModuleTree(c.Modules) +} + +func (c *Compiler) setRuleTree() { + c.RuleTree = NewRuleTree(c.ModuleTree) +} + +func (c *Compiler) setGraph() { + list := func(r Ref) []*Rule { + return c.GetRulesDynamicWithOpts(r, RulesOptions{IncludeHiddenModules: true}) + } + c.Graph = NewGraph(c.Modules, list) +} + +type queryCompiler struct { + compiler *Compiler + qctx *QueryContext + typeEnv *TypeEnv + rewritten map[Var]Var + after map[string][]QueryCompilerStageDefinition + unsafeBuiltins map[string]struct{} + comprehensionIndices map[*Term]*ComprehensionIndex + enablePrintStatements bool +} + +func newQueryCompiler(compiler *Compiler) QueryCompiler { + qc := &queryCompiler{ + compiler: compiler, + qctx: nil, + after: map[string][]QueryCompilerStageDefinition{}, + comprehensionIndices: map[*Term]*ComprehensionIndex{}, + } + return qc +} + +func (qc *queryCompiler) WithStrict(strict bool) QueryCompiler { + qc.compiler.WithStrict(strict) + return qc +} + +func (qc *queryCompiler) WithEnablePrintStatements(yes bool) QueryCompiler { + qc.enablePrintStatements = yes + return qc +} + +func (qc *queryCompiler) WithContext(qctx *QueryContext) QueryCompiler { + qc.qctx = qctx + return qc +} + +func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler { + qc.after[after] = append(qc.after[after], stage) + return qc +} + +func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler { + qc.unsafeBuiltins = unsafe + return qc +} + +func (qc *queryCompiler) RewrittenVars() map[Var]Var { + return qc.rewritten +} + +func (qc *queryCompiler) ComprehensionIndex(term *Term) *ComprehensionIndex { + if result, ok := qc.comprehensionIndices[term]; ok { + return result + } else if result, ok := qc.compiler.comprehensionIndices[term]; ok { + return result + } + return nil +} + +func (qc *queryCompiler) runStage(metricName string, qctx *QueryContext, query Body, s func(*QueryContext, Body) (Body, error)) (Body, error) { + if qc.compiler.metrics != nil { + qc.compiler.metrics.Timer(metricName).Start() + defer qc.compiler.metrics.Timer(metricName).Stop() + } + return s(qctx, query) +} + +func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCompilerStage) (Body, error) { + if qc.compiler.metrics != nil { + qc.compiler.metrics.Timer(metricName).Start() + defer qc.compiler.metrics.Timer(metricName).Stop() + } + return s(qc, query) +} + +type queryStage = struct { + name string + metricName string + f func(*QueryContext, Body) (Body, error) +} + +func (qc *queryCompiler) Compile(query Body) (Body, error) { + if len(query) == 0 { + return nil, Errors{NewError(CompileErr, nil, "empty query cannot be compiled")} + } + + query = query.Copy() + + stages := []queryStage{ + {"CheckKeywordOverrides", "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides}, + {"ResolveRefs", "query_compile_stage_resolve_refs", qc.resolveRefs}, + {"RewriteLocalVars", "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars}, + {"CheckVoidCalls", "query_compile_stage_check_void_calls", qc.checkVoidCalls}, + {"RewritePrintCalls", "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls}, + {"RewriteExprTerms", "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms}, + {"RewriteComprehensionTerms", "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms}, + {"RewriteWithValues", "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers}, + {"CheckUndefinedFuncs", "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs}, + {"CheckSafety", "query_compile_stage_check_safety", qc.checkSafety}, + {"RewriteDynamicTerms", "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms}, + {"CheckTypes", "query_compile_stage_check_types", qc.checkTypes}, + {"CheckUnsafeBuiltins", "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins}, + {"CheckDeprecatedBuiltins", "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins}, + } + if qc.compiler.evalMode == EvalModeTopdown { + stages = append(stages, queryStage{"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices}) + } + + qctx := qc.qctx.Copy() + + for _, s := range stages { + var err error + query, err = qc.runStage(s.metricName, qctx, query, s.f) + if err != nil { + return nil, qc.applyErrorLimit(err) + } + for _, s := range qc.after[s.name] { + query, err = qc.runStageAfter(s.MetricName, query, s.Stage) + if err != nil { + return nil, qc.applyErrorLimit(err) + } + } + } + + return query, nil +} + +func (qc *queryCompiler) TypeEnv() *TypeEnv { + return qc.typeEnv +} + +func (qc *queryCompiler) applyErrorLimit(err error) error { + var errs Errors + if errors.As(err, &errs) { + if qc.compiler.maxErrs > 0 && len(errs) > qc.compiler.maxErrs { + err = append(errs[:qc.compiler.maxErrs], errLimitReached) + } + } + return err +} + +func (qc *queryCompiler) checkKeywordOverrides(_ *QueryContext, body Body) (Body, error) { + if qc.compiler.strict { + if errs := checkRootDocumentOverrides(body); len(errs) > 0 { + return nil, errs + } + } + return body, nil +} + +func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error) { + + var globals map[Var]*usedRef + + if qctx != nil { + pkg := qctx.Package + // Query compiler ought to generate a package if one was not provided and one or more imports were provided. + // The generated package name could even be an empty string to avoid conflicts (it doesn't have to be valid syntactically) + if pkg == nil && len(qctx.Imports) > 0 { + pkg = &Package{Path: RefTerm(VarTerm("")).Value.(Ref)} + } + if pkg != nil { + var ruleExports []Ref + rules := qc.compiler.getExports() + if exist, ok := rules.Get(pkg.Path); ok { + ruleExports = exist + } + + globals = getGlobals(qctx.Package, ruleExports, qctx.Imports) + qctx.Imports = nil + } + } + + ignore := &declaredVarStack{declaredVars(body)} + + return resolveRefsInBody(globals, ignore, body), nil +} + +func (*queryCompiler) rewriteComprehensionTerms(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + f := newEqualityFactory(gen) + node, err := rewriteComprehensionTerms(f, body) + if err != nil { + return nil, err + } + return node.(Body), nil +} + +func (*queryCompiler) rewriteDynamicTerms(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + f := newEqualityFactory(gen) + return rewriteDynamics(f, body), nil +} + +func (*queryCompiler) rewriteExprTerms(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + return rewriteExprTermsInBody(gen, body), nil +} + +func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + stack := newLocalDeclaredVars() + body, _, err := rewriteLocalVars(gen, stack, nil, body, qc.compiler.strict) + if len(err) != 0 { + return nil, err + } + + // The vars returned during the rewrite will include all seen vars, + // even if they're not declared with an assignment operation. We don't + // want to include these inside the rewritten set though. + qc.rewritten = maps.Clone(stack.rewritten) + + return body, nil +} + +func (qc *queryCompiler) rewritePrintCalls(_ *QueryContext, body Body) (Body, error) { + if !qc.enablePrintStatements { + _, cpy := erasePrintCallsInBody(body) + return cpy, nil + } + gen := newLocalVarGenerator("q", body) + if _, errs := rewritePrintCalls(gen, qc.compiler.GetArity, ReservedVars, body); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) checkVoidCalls(_ *QueryContext, body Body) (Body, error) { + if errs := checkVoidCalls(qc.compiler.TypeEnv, body); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) checkUndefinedFuncs(_ *QueryContext, body Body) (Body, error) { + if errs := checkUndefinedFuncs(qc.compiler.TypeEnv, body, qc.compiler.GetArity, qc.rewritten); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) checkSafety(_ *QueryContext, body Body) (Body, error) { + safe := ReservedVars.Copy() + reordered, unsafe := reorderBodyForSafety(qc.compiler.builtins, qc.compiler.GetArity, safe, body) + if errs := safetyErrorSlice(unsafe, qc.RewrittenVars()); len(errs) > 0 { + return nil, errs + } + return reordered, nil +} + +func (qc *queryCompiler) checkTypes(_ *QueryContext, body Body) (Body, error) { + var errs Errors + checker := newTypeChecker(). + WithSchemaSet(qc.compiler.schemaSet). + WithInputType(qc.compiler.inputType). + WithVarRewriter(rewriteVarsInRef(qc.rewritten, qc.compiler.RewrittenVars)) + qc.typeEnv, errs = checker.CheckBody(qc.compiler.TypeEnv, body) + if len(errs) > 0 { + return nil, errs + } + + return body, nil +} + +func (qc *queryCompiler) checkUnsafeBuiltins(_ *QueryContext, body Body) (Body, error) { + errs := checkUnsafeBuiltins(qc.unsafeBuiltinsMap(), body) + if len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) unsafeBuiltinsMap() map[string]struct{} { + if qc.unsafeBuiltins != nil { + return qc.unsafeBuiltins + } + return qc.compiler.unsafeBuiltinsMap +} + +func (qc *queryCompiler) checkDeprecatedBuiltins(_ *QueryContext, body Body) (Body, error) { + if qc.compiler.strict { + errs := checkDeprecatedBuiltins(qc.compiler.deprecatedBuiltinsMap, body) + if len(errs) > 0 { + return nil, errs + } + } + return body, nil +} + +func (qc *queryCompiler) rewriteWithModifiers(_ *QueryContext, body Body) (Body, error) { + f := newEqualityFactory(newLocalVarGenerator("q", body)) + body, err := rewriteWithModifiersInBody(qc.compiler, qc.unsafeBuiltinsMap(), f, body) + if err != nil { + return nil, Errors{err} + } + return body, nil +} + +func (qc *queryCompiler) buildComprehensionIndices(_ *QueryContext, body Body) (Body, error) { + // NOTE(tsandall): The query compiler does not have a metrics object so we + // cannot record index metrics currently. + _ = buildComprehensionIndices(qc.compiler.debug, qc.compiler.GetArity, ReservedVars, qc.RewrittenVars(), body, qc.comprehensionIndices) + return body, nil +} + +// ComprehensionIndex specifies how the comprehension term can be indexed. The keys +// tell the evaluator what variables to use for indexing. In the future, the index +// could be expanded with more information that would allow the evaluator to index +// a larger fragment of comprehensions (e.g., by closing over variables in the outer +// query.) +type ComprehensionIndex struct { + Term *Term + Keys []*Term +} + +func (ci *ComprehensionIndex) String() string { + if ci == nil { + return "" + } + return fmt.Sprintf("", NewArray(ci.Keys...)) +} + +func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, node interface{}, result map[*Term]*ComprehensionIndex) uint64 { + var n uint64 + cpy := candidates.Copy() + WalkBodies(node, func(b Body) bool { + for _, expr := range b { + index := getComprehensionIndex(dbg, arity, cpy, rwVars, expr) + if index != nil { + result[index.Term] = index + n++ + } + // Any variables appearing in the expressions leading up to the comprehension + // are fair-game to be used as index keys. + cpy.Update(expr.Vars(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true})) + } + return false + }) + return n +} + +func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, expr *Expr) *ComprehensionIndex { + + // Ignore everything except = expressions. Extract + // the comprehension term from the expression. + if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 { + // No debug message, these are assumed to be known hinderances + // to comprehension indexing. + return nil + } + + var term *Term + + lhs, rhs := expr.Operand(0), expr.Operand(1) + + if _, ok := lhs.Value.(Var); ok && IsComprehension(rhs.Value) { + term = rhs + } else if _, ok := rhs.Value.(Var); ok && IsComprehension(lhs.Value) { + term = lhs + } + + if term == nil { + // no debug for this, it's the ordinary "nothing to do here" case + return nil + } + + // Ignore comprehensions that contain expressions that close over variables + // in the outer body if those variables are not also output variables in the + // comprehension body. In other words, ignore comprehensions that we cannot + // safely evaluate without bindings from the outer body. For example: + // + // x = [1] + // [true | data.y[z] = x] # safe to evaluate w/o outer body + // [true | data.y[z] = x[0]] # NOT safe to evaluate because 'x' would be unsafe. + // + // By identifying output variables in the body we also know what to index on by + // intersecting with candidate variables from the outer query. + // + // For example: + // + // x = data.foo[_] + // _ = [y | data.bar[y] = x] # index on 'x' + // + // This query goes from O(data.foo*data.bar) to O(data.foo+data.bar). + var body Body + + switch x := term.Value.(type) { + case *ArrayComprehension: + body = x.Body + case *SetComprehension: + body = x.Body + case *ObjectComprehension: + body = x.Body + } + + outputs := outputVarsForBody(body, arity, ReservedVars) + unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars) + + if len(unsafe) > 0 { + dbg.Printf("%s: comprehension index: unsafe vars: %v", expr.Location, unsafe) + return nil + } + + // Similarly, ignore comprehensions that contain references with output variables + // that intersect with the candidates. Indexing these comprehensions could worsen + // performance. + regressionVis := newComprehensionIndexRegressionCheckVisitor(candidates) + regressionVis.Walk(body) + if regressionVis.worse { + dbg.Printf("%s: comprehension index: output vars intersect candidates", expr.Location) + return nil + } + + // Check if any nested comprehensions close over candidates. If any intersection is found + // the comprehension cannot be cached because it would require closing over the candidates + // which the evaluator does not support today. + nestedVis := newComprehensionIndexNestedCandidateVisitor(candidates) + nestedVis.Walk(body) + if nestedVis.found { + dbg.Printf("%s: comprehension index: nested comprehensions close over candidates", expr.Location) + return nil + } + + // Make a sorted set of variable names that will serve as the index key set. + // Sort to ensure deterministic indexing. In future this could be relaxed + // if we can decide that one ordering is better than another. If the set is + // empty, there is no indexing to do. + indexVars := candidates.Intersect(outputs) + if len(indexVars) == 0 { + dbg.Printf("%s: comprehension index: no index vars", expr.Location) + return nil + } + + result := make([]*Term, 0, len(indexVars)) + + for v := range indexVars { + result = append(result, NewTerm(v)) + } + + slices.SortFunc(result, TermValueCompare) + + debugRes := make([]*Term, len(result)) + for i, r := range result { + if o, ok := rwVars[r.Value.(Var)]; ok { + debugRes[i] = NewTerm(o) + } else { + debugRes[i] = r + } + } + dbg.Printf("%s: comprehension index: built with keys: %v", expr.Location, debugRes) + return &ComprehensionIndex{Term: term, Keys: result} +} + +type comprehensionIndexRegressionCheckVisitor struct { + candidates VarSet + seen VarSet + worse bool +} + +// TODO(tsandall): Improve this so that users can either supply this list explicitly +// or the information is maintained on the built-in function declaration. What we really +// need to know is whether the built-in function allows callers to push down output +// values or not. It's unlikely that anything outside of OPA does this today so this +// solution is fine for now. +var comprehensionIndexBlacklist = map[string]int{ + WalkBuiltin.Name: len(WalkBuiltin.Decl.FuncArgs().Args), +} + +func newComprehensionIndexRegressionCheckVisitor(candidates VarSet) *comprehensionIndexRegressionCheckVisitor { + return &comprehensionIndexRegressionCheckVisitor{ + candidates: candidates, + seen: NewVarSet(), + } +} + +func (vis *comprehensionIndexRegressionCheckVisitor) Walk(x interface{}) { + NewGenericVisitor(vis.visit).Walk(x) +} + +func (vis *comprehensionIndexRegressionCheckVisitor) visit(x interface{}) bool { + if !vis.worse { + switch x := x.(type) { + case *Expr: + operands := x.Operands() + if pos := comprehensionIndexBlacklist[x.Operator().String()]; pos > 0 && pos < len(operands) { + vis.assertEmptyIntersection(operands[pos].Vars()) + } + case Ref: + vis.assertEmptyIntersection(x.OutputVars()) + case Var: + vis.seen.Add(x) + // Always skip comprehensions. We do not have to visit their bodies here. + case *ArrayComprehension, *SetComprehension, *ObjectComprehension: + return true + } + } + return vis.worse +} + +func (vis *comprehensionIndexRegressionCheckVisitor) assertEmptyIntersection(vs VarSet) { + for v := range vs { + if vis.candidates.Contains(v) && !vis.seen.Contains(v) { + vis.worse = true + return + } + } +} + +type comprehensionIndexNestedCandidateVisitor struct { + candidates VarSet + found bool +} + +func newComprehensionIndexNestedCandidateVisitor(candidates VarSet) *comprehensionIndexNestedCandidateVisitor { + return &comprehensionIndexNestedCandidateVisitor{ + candidates: candidates, + } +} + +func (vis *comprehensionIndexNestedCandidateVisitor) Walk(x interface{}) { + NewGenericVisitor(vis.visit).Walk(x) +} + +func (vis *comprehensionIndexNestedCandidateVisitor) visit(x interface{}) bool { + + if vis.found { + return true + } + + if v, ok := x.(Value); ok && IsComprehension(v) { + varVis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) + varVis.Walk(v) + vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0 + return true + } + + return false +} + +// ModuleTreeNode represents a node in the module tree. The module +// tree is keyed by the package path. +type ModuleTreeNode struct { + Key Value + Modules []*Module + Children map[Value]*ModuleTreeNode + Hide bool +} + +func (n *ModuleTreeNode) String() string { + var rules []string + for _, m := range n.Modules { + for _, r := range m.Rules { + rules = append(rules, r.Head.String()) + } + } + return fmt.Sprintf("", n.Key, n.Children, rules, n.Hide) +} + +// NewModuleTree returns a new ModuleTreeNode that represents the root +// of the module tree populated with the given modules. +func NewModuleTree(mods map[string]*Module) *ModuleTreeNode { + root := &ModuleTreeNode{ + Children: map[Value]*ModuleTreeNode{}, + } + for _, name := range util.KeysSorted(mods) { + m := mods[name] + node := root + for i, x := range m.Package.Path { + c, ok := node.Children[x.Value] + if !ok { + var hide bool + if i == 1 && x.Value.Compare(SystemDocumentKey) == 0 { + hide = true + } + c = &ModuleTreeNode{ + Key: x.Value, + Children: map[Value]*ModuleTreeNode{}, + Hide: hide, + } + node.Children[x.Value] = c + } + node = c + } + node.Modules = append(node.Modules, m) + } + return root +} + +// Size returns the number of modules in the tree. +func (n *ModuleTreeNode) Size() int { + s := len(n.Modules) + for _, c := range n.Children { + s += c.Size() + } + return s +} + +// Child returns n's child with key k. +func (n *ModuleTreeNode) child(k Value) *ModuleTreeNode { + switch k.(type) { + case String, Var: + return n.Children[k] + } + return nil +} + +// Find dereferences ref along the tree. ref[0] is converted to a String +// for convenience. +func (n *ModuleTreeNode) find(ref Ref) (*ModuleTreeNode, Ref) { + if v, ok := ref[0].Value.(Var); ok { + ref = Ref{StringTerm(string(v))}.Concat(ref[1:]) + } + node := n + for i, r := range ref { + next := node.child(r.Value) + if next == nil { + tail := make(Ref, len(ref)-i) + tail[0] = VarTerm(string(ref[i].Value.(String))) + copy(tail[1:], ref[i+1:]) + return node, tail + } + node = next + } + return node, nil +} + +// DepthFirst performs a depth-first traversal of the module tree rooted at n. +// If f returns true, traversal will not continue to the children of n. +func (n *ModuleTreeNode) DepthFirst(f func(*ModuleTreeNode) bool) { + if f(n) { + return + } + for _, node := range n.Children { + node.DepthFirst(f) + } +} + +// TreeNode represents a node in the rule tree. The rule tree is keyed by +// rule path. +type TreeNode struct { + Key Value + Values []any + Children map[Value]*TreeNode + Sorted []Value + Hide bool +} + +func (n *TreeNode) String() string { + return fmt.Sprintf("", n.Key, n.Values, n.Sorted, n.Hide) +} + +// NewRuleTree returns a new TreeNode that represents the root +// of the rule tree populated with the given rules. +func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { + root := TreeNode{ + Key: mtree.Key, + } + + mtree.DepthFirst(func(m *ModuleTreeNode) bool { + for _, mod := range m.Modules { + if len(mod.Rules) == 0 { + root.add(mod.Package.Path, nil) + } + for _, rule := range mod.Rules { + root.add(rule.Ref().GroundPrefix(), rule) + } + } + return false + }) + + // ensure that data.system's TreeNode is hidden + node, tail := root.find(DefaultRootRef.Append(NewTerm(SystemDocumentKey))) + if len(tail) == 0 { // found + node.Hide = true + } + + root.DepthFirst(func(x *TreeNode) bool { + x.sort() + return false + }) + + return &root +} + +func (n *TreeNode) add(path Ref, rule *Rule) { + node, tail := n.find(path) + if len(tail) > 0 { + sub := treeNodeFromRef(tail, rule) + if node.Children == nil { + node.Children = make(map[Value]*TreeNode, 1) + } + node.Children[sub.Key] = sub + node.Sorted = append(node.Sorted, sub.Key) + } else if rule != nil { + node.Values = append(node.Values, rule) + } +} + +// Size returns the number of rules in the tree. +func (n *TreeNode) Size() int { + s := len(n.Values) + for _, c := range n.Children { + s += c.Size() + } + return s +} + +// Child returns n's child with key k. +func (n *TreeNode) Child(k Value) *TreeNode { + switch k.(type) { + case Ref, Call: + return nil + default: + return n.Children[k] + } +} + +// Find dereferences ref along the tree +func (n *TreeNode) Find(ref Ref) *TreeNode { + node := n + for _, r := range ref { + node = node.Child(r.Value) + if node == nil { + return nil + } + } + return node +} + +// Iteratively dereferences ref along the node's subtree. +// - If matching fails immediately, the tail will contain the full ref. +// - Partial matching will result in a tail of non-zero length. +// - A complete match will result in a 0 length tail. +func (n *TreeNode) find(ref Ref) (*TreeNode, Ref) { + node := n + for i := range ref { + next := node.Child(ref[i].Value) + if next == nil { + tail := make(Ref, len(ref)-i) + copy(tail, ref[i:]) + return node, tail + } + node = next + } + return node, nil +} + +// DepthFirst performs a depth-first traversal of the rule tree rooted at n. If +// f returns true, traversal will not continue to the children of n. +func (n *TreeNode) DepthFirst(f func(*TreeNode) bool) { + if f(n) { + return + } + for _, node := range n.Children { + node.DepthFirst(f) + } +} + +func (n *TreeNode) sort() { + slices.SortFunc(n.Sorted, Value.Compare) +} + +func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode { + depth := len(ref) - 1 + key := ref[depth].Value + node := &TreeNode{ + Key: key, + Children: nil, + } + if rule != nil { + node.Values = []any{rule} + } + + for i := len(ref) - 2; i >= 0; i-- { + key := ref[i].Value + node = &TreeNode{ + Key: key, + Children: map[Value]*TreeNode{ref[i+1].Value: node}, + Sorted: []Value{ref[i+1].Value}, + } + } + return node +} + +// flattenChildren flattens all children's rule refs into a sorted array. +func (n *TreeNode) flattenChildren() []Ref { + ret := newRefSet() + for _, sub := range n.Children { // we only want the children, so don't use n.DepthFirst() right away + sub.DepthFirst(func(x *TreeNode) bool { + for _, r := range x.Values { + rule := r.(*Rule) + ret.AddPrefix(rule.Ref()) + } + return false + }) + } + + slices.SortFunc(ret.s, RefCompare) + return ret.s +} + +// Graph represents the graph of dependencies between rules. +type Graph struct { + adj map[util.T]map[util.T]struct{} + radj map[util.T]map[util.T]struct{} + nodes map[util.T]struct{} + sorted []util.T +} + +// NewGraph returns a new Graph based on modules. The list function must return +// the rules referred to directly by the ref. +func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph { + + graph := &Graph{ + adj: map[util.T]map[util.T]struct{}{}, + radj: map[util.T]map[util.T]struct{}{}, + nodes: map[util.T]struct{}{}, + sorted: nil, + } + + // Create visitor to walk a rule AST and add edges to the rule graph for + // each dependency. + vis := func(a *Rule) *GenericVisitor { + stop := false + return NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case Ref: + for _, b := range list(x) { + for node := b; node != nil; node = node.Else { + graph.addDependency(a, node) + } + } + case *Rule: + if stop { + // Do not recurse into else clauses (which will be handled + // by the outer visitor.) + return true + } + stop = true + } + return false + }) + } + + // Walk over all rules, add them to graph, and build adjacency lists. + for _, module := range modules { + WalkRules(module, func(a *Rule) bool { + graph.addNode(a) + vis(a).Walk(a) + return false + }) + } + + return graph +} + +// Dependencies returns the set of rules that x depends on. +func (g *Graph) Dependencies(x util.T) map[util.T]struct{} { + return g.adj[x] +} + +// Dependents returns the set of rules that depend on x. +func (g *Graph) Dependents(x util.T) map[util.T]struct{} { + return g.radj[x] +} + +// Sort returns a slice of rules sorted by dependencies. If a cycle is found, +// ok is set to false. +func (g *Graph) Sort() (sorted []util.T, ok bool) { + if g.sorted != nil { + return g.sorted, true + } + + sorter := &graphSort{ + sorted: make([]util.T, 0, len(g.nodes)), + deps: g.Dependencies, + marked: map[util.T]struct{}{}, + temp: map[util.T]struct{}{}, + } + + for node := range g.nodes { + if !sorter.Visit(node) { + return nil, false + } + } + + g.sorted = sorter.sorted + return g.sorted, true +} + +func (g *Graph) addDependency(u util.T, v util.T) { + + if _, ok := g.nodes[u]; !ok { + g.addNode(u) + } + + if _, ok := g.nodes[v]; !ok { + g.addNode(v) + } + + edges, ok := g.adj[u] + if !ok { + edges = map[util.T]struct{}{} + g.adj[u] = edges + } + + edges[v] = struct{}{} + + edges, ok = g.radj[v] + if !ok { + edges = map[util.T]struct{}{} + g.radj[v] = edges + } + + edges[u] = struct{}{} +} + +func (g *Graph) addNode(n util.T) { + g.nodes[n] = struct{}{} +} + +type graphSort struct { + sorted []util.T + deps func(util.T) map[util.T]struct{} + marked map[util.T]struct{} + temp map[util.T]struct{} +} + +func (sort *graphSort) Marked(node util.T) bool { + _, marked := sort.marked[node] + return marked +} + +func (sort *graphSort) Visit(node util.T) (ok bool) { + if _, ok := sort.temp[node]; ok { + return false + } + if sort.Marked(node) { + return true + } + sort.temp[node] = struct{}{} + for other := range sort.deps(node) { + if !sort.Visit(other) { + return false + } + } + sort.marked[node] = struct{}{} + delete(sort.temp, node) + sort.sorted = append(sort.sorted, node) + return true +} + +// GraphTraversal is a Traversal that understands the dependency graph +type GraphTraversal struct { + graph *Graph + visited map[util.T]struct{} +} + +// NewGraphTraversal returns a Traversal for the dependency graph +func NewGraphTraversal(graph *Graph) *GraphTraversal { + return &GraphTraversal{ + graph: graph, + visited: map[util.T]struct{}{}, + } +} + +// Edges lists all dependency connections for a given node +func (g *GraphTraversal) Edges(x util.T) []util.T { + r := []util.T{} + for v := range g.graph.Dependencies(x) { + r = append(r, v) + } + return r +} + +// Visited returns whether a node has been visited, setting a node to visited if not +func (g *GraphTraversal) Visited(u util.T) bool { + _, ok := g.visited[u] + g.visited[u] = struct{}{} + return ok +} + +type unsafePair struct { + Expr *Expr + Vars VarSet +} + +type unsafeVarLoc struct { + Var Var + Loc *Location +} + +type unsafeVars map[*Expr]VarSet + +func (vs unsafeVars) Add(e *Expr, v Var) { + if u, ok := vs[e]; ok { + u[v] = struct{}{} + } else { + vs[e] = VarSet{v: struct{}{}} + } +} + +func (vs unsafeVars) Set(e *Expr, s VarSet) { + vs[e] = s +} + +func (vs unsafeVars) Update(o unsafeVars) { + for k, v := range o { + if _, ok := vs[k]; !ok { + vs[k] = VarSet{} + } + vs[k].Update(v) + } +} + +func (vs unsafeVars) Vars() (result []unsafeVarLoc) { + + locs := map[Var]*Location{} + + // If var appears in multiple sets then pick first by location. + for expr, vars := range vs { + for v := range vars { + if locs[v].Compare(expr.Location) > 0 { + locs[v] = expr.Location + } + } + } + + for v, loc := range locs { + result = append(result, unsafeVarLoc{ + Var: v, + Loc: loc, + }) + } + + slices.SortFunc(result, func(a, b unsafeVarLoc) int { + return a.Loc.Compare(b.Loc) + }) + + return result +} + +func (vs unsafeVars) Slice() (result []unsafePair) { + for expr, vs := range vs { + result = append(result, unsafePair{ + Expr: expr, + Vars: vs, + }) + } + return +} + +// reorderBodyForSafety returns a copy of the body ordered such that +// left to right evaluation of the body will not encounter unbound variables +// in input positions or negated expressions. +// +// Expressions are added to the re-ordered body as soon as they are considered +// safe. If multiple expressions become safe in the same pass, they are added +// in their original order. This results in minimal re-ordering of the body. +// +// If the body cannot be reordered to ensure safety, the second return value +// contains a mapping of expressions to unsafe variables in those expressions. +func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) { + + bodyVars := body.Vars(SafetyCheckVisitorParams) + reordered := make(Body, 0, len(body)) + safe := VarSet{} + unsafe := unsafeVars{} + + for _, e := range body { + for v := range e.Vars(SafetyCheckVisitorParams) { + if globals.Contains(v) { + safe.Add(v) + } else { + unsafe.Add(e, v) + } + } + } + + for { + n := len(reordered) + + for _, e := range body { + if reordered.Contains(e) { + continue + } + + ovs := outputVarsForExpr(e, arity, safe) + + // check closures: is this expression closing over variables that + // haven't been made safe by what's already included in `reordered`? + vs := unsafeVarsInClosures(e) + cv := vs.Intersect(bodyVars).Diff(globals) + uv := cv.Diff(outputVarsForBody(reordered, arity, safe)) + + if len(uv) > 0 { + if uv.Equal(ovs) { // special case "closure-self" + continue + } + unsafe.Set(e, uv) + } + + for v := range unsafe[e] { + if ovs.Contains(v) || safe.Contains(v) { + delete(unsafe[e], v) + } + } + + if len(unsafe[e]) == 0 { + delete(unsafe, e) + reordered.Append(e) + safe.Update(ovs) // this expression's outputs are safe + } + } + + if len(reordered) == n { // fixed point, could not add any expr of body + break + } + } + + // Recursively visit closures and perform the safety checks on them. + // Update the globals at each expression to include the variables that could + // be closed over. + g := globals.Copy() + for i, e := range reordered { + if i > 0 { + g.Update(reordered[i-1].Vars(SafetyCheckVisitorParams)) + } + xform := &bodySafetyTransformer{ + builtins: builtins, + arity: arity, + current: e, + globals: g, + unsafe: unsafe, + } + NewGenericVisitor(xform.Visit).Walk(e) + } + + return reordered, unsafe +} + +type bodySafetyTransformer struct { + builtins map[string]*Builtin + arity func(Ref) int + current *Expr + globals VarSet + unsafe unsafeVars +} + +func (xform *bodySafetyTransformer) Visit(x interface{}) bool { + switch term := x.(type) { + case *Term: + switch x := term.Value.(type) { + case *object: + cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { + kcpy := k.Copy() + NewGenericVisitor(xform.Visit).Walk(kcpy) + vcpy := v.Copy() + NewGenericVisitor(xform.Visit).Walk(vcpy) + return kcpy, vcpy, nil + }) + term.Value = cpy + return true + case *set: + cpy, _ := x.Map(func(v *Term) (*Term, error) { + vcpy := v.Copy() + NewGenericVisitor(xform.Visit).Walk(vcpy) + return vcpy, nil + }) + term.Value = cpy + return true + case *ArrayComprehension: + xform.reorderArrayComprehensionSafety(x) + return true + case *ObjectComprehension: + xform.reorderObjectComprehensionSafety(x) + return true + case *SetComprehension: + xform.reorderSetComprehensionSafety(x) + return true + } + case *Expr: + if ev, ok := term.Terms.(*Every); ok { + xform.globals.Update(ev.KeyValueVars()) + ev.Body = xform.reorderComprehensionSafety(NewVarSet(), ev.Body) + return true + } + } + return false +} + +func (xform *bodySafetyTransformer) reorderComprehensionSafety(tv VarSet, body Body) Body { + bv := body.Vars(SafetyCheckVisitorParams) + bv.Update(xform.globals) + uv := tv.Diff(bv) + for v := range uv { + xform.unsafe.Add(xform.current, v) + } + + r, u := reorderBodyForSafety(xform.builtins, xform.arity, xform.globals, body) + if len(u) == 0 { + return r + } + + xform.unsafe.Update(u) + return body +} + +func (xform *bodySafetyTransformer) reorderArrayComprehensionSafety(ac *ArrayComprehension) { + ac.Body = xform.reorderComprehensionSafety(ac.Term.Vars(), ac.Body) +} + +func (xform *bodySafetyTransformer) reorderObjectComprehensionSafety(oc *ObjectComprehension) { + tv := oc.Key.Vars() + tv.Update(oc.Value.Vars()) + oc.Body = xform.reorderComprehensionSafety(tv, oc.Body) +} + +func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetComprehension) { + sc.Body = xform.reorderComprehensionSafety(sc.Term.Vars(), sc.Body) +} + +// unsafeVarsInClosures collects vars that are contained in closures within +// this expression. +func unsafeVarsInClosures(e *Expr) VarSet { + vs := VarSet{} + WalkClosures(e, func(x interface{}) bool { + vis := &VarVisitor{vars: vs} + if ev, ok := x.(*Every); ok { + vis.Walk(ev.Body) + return true + } + vis.Walk(x) + return true + }) + return vs +} + +// OutputVarsFromBody returns all variables which are the "output" for +// the given body. For safety checks this means that they would be +// made safe by the body. +func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { + return outputVarsForBody(body, c.GetArity, safe) +} + +func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet) VarSet { + o := safe.Copy() + for _, e := range body { + o.Update(outputVarsForExpr(e, arity, o)) + } + return o.Diff(safe) +} + +// OutputVarsFromExpr returns all variables which are the "output" for +// the given expression. For safety checks this means that they would be +// made safe by the expr. +func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { + return outputVarsForExpr(expr, c.GetArity, safe) +} + +func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet) VarSet { + + // Negated expressions must be safe. + if expr.Negated { + return VarSet{} + } + + // With modifier inputs must be safe. + for _, with := range expr.With { + vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) + vis.Walk(with) + vars := vis.Vars() + unsafe := vars.Diff(safe) + if len(unsafe) > 0 { + return VarSet{} + } + } + + switch terms := expr.Terms.(type) { + case *Term: + return outputVarsForTerms(expr, safe) + case []*Term: + if expr.IsEquality() { + return outputVarsForExprEq(expr, safe) + } + + operator, ok := terms[0].Value.(Ref) + if !ok { + return VarSet{} + } + + ar := arity(operator) + if ar < 0 { + return VarSet{} + } + + return outputVarsForExprCall(expr, ar, safe, terms) + case *Every: + return outputVarsForTerms(terms.Domain, safe) + default: + panic("illegal expression") + } +} + +func outputVarsForExprEq(expr *Expr, safe VarSet) VarSet { + + if !validEqAssignArgCount(expr) { + return safe + } + + output := outputVarsForTerms(expr, safe) + output.Update(safe) + output.Update(Unify(output, expr.Operand(0), expr.Operand(1))) + + return output.Diff(safe) +} + +func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term) VarSet { + + output := outputVarsForTerms(expr, safe) + + numInputTerms := arity + 1 + if numInputTerms >= len(terms) { + return output + } + + params := VarVisitorParams{ + SkipClosures: true, + SkipSets: true, + SkipObjectKeys: true, + SkipRefHead: true, + } + vis := NewVarVisitor().WithParams(params) + vis.Walk(Args(terms[:numInputTerms])) + unsafe := vis.Vars().Diff(output).Diff(safe) + + if len(unsafe) > 0 { + return VarSet{} + } + + vis = NewVarVisitor().WithParams(params) + vis.Walk(Args(terms[numInputTerms:])) + output.Update(vis.vars) + return output +} + +func outputVarsForTerms(expr interface{}, safe VarSet) VarSet { + output := VarSet{} + WalkTerms(expr, func(x *Term) bool { + switch r := x.Value.(type) { + case *SetComprehension, *ArrayComprehension, *ObjectComprehension: + return true + case Ref: + if !isRefSafe(r, safe) { + return true + } + output.Update(r.OutputVars()) + return false + } + return false + }) + return output +} + +type equalityFactory struct { + gen *localVarGenerator +} + +func newEqualityFactory(gen *localVarGenerator) *equalityFactory { + return &equalityFactory{gen} +} + +func (f *equalityFactory) Generate(other *Term) *Expr { + term := NewTerm(f.gen.Generate()).SetLocation(other.Location) + expr := Equality.Expr(term, other) + expr.Generated = true + expr.Location = other.Location + return expr +} + +// TODO: Move to internal package? +const LocalVarPrefix = "__local" + +type localVarGenerator struct { + exclude VarSet + suffix string + next int +} + +func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Module) *localVarGenerator { + exclude := NewVarSet() + vis := &VarVisitor{vars: exclude} + for _, key := range sorted { + vis.Walk(modules[key]) + } + return &localVarGenerator{exclude: exclude, next: 0} +} + +func newLocalVarGenerator(suffix string, node interface{}) *localVarGenerator { + exclude := NewVarSet() + vis := &VarVisitor{vars: exclude} + vis.Walk(node) + return &localVarGenerator{exclude: exclude, suffix: suffix, next: 0} +} + +func (l *localVarGenerator) Generate() Var { + for { + result := Var(LocalVarPrefix + l.suffix + strconv.Itoa(l.next) + "__") + l.next++ + if !l.exclude.Contains(result) { + return result + } + } +} + +func getGlobals(pkg *Package, rules []Ref, imports []*Import) map[Var]*usedRef { + + globals := make(map[Var]*usedRef, len(rules)) // NB: might grow bigger with imports + + // Populate globals with exports within the package. + for _, ref := range rules { + v := ref[0].Value.(Var) + globals[v] = &usedRef{ref: pkg.Path.Append(StringTerm(string(v)))} + } + + // Populate globals with imports. + for _, imp := range imports { + path := imp.Path.Value.(Ref) + if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { + continue // ignore future and rego imports + } + globals[imp.Name()] = &usedRef{ref: path} + } + + return globals +} + +func requiresEval(x *Term) bool { + if x == nil { + return false + } + return ContainsRefs(x) || ContainsComprehensions(x) +} + +func resolveRef(globals map[Var]*usedRef, ignore *declaredVarStack, ref Ref) Ref { + + r := Ref{} + for i, x := range ref { + switch v := x.Value.(type) { + case Var: + if g, ok := globals[v]; ok && !ignore.Contains(v) { + cpy := g.ref.Copy() + for i := range cpy { + cpy[i].SetLocation(x.Location) + } + if i == 0 { + r = cpy + } else { + r = append(r, NewTerm(cpy).SetLocation(x.Location)) + } + g.used = true + } else { + r = append(r, x) + } + case Ref, *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: + r = append(r, resolveRefsInTerm(globals, ignore, x)) + default: + r = append(r, x) + } + } + + return r +} + +type usedRef struct { + ref Ref + used bool +} + +func resolveRefsInRule(globals map[Var]*usedRef, rule *Rule) error { + ignore := &declaredVarStack{} + + vars := NewVarSet() + var vis *GenericVisitor + var err error + + // Walk args to collect vars and transform body so that callers can shadow + // root documents. + vis = NewGenericVisitor(func(x interface{}) bool { + if err != nil { + return true + } + switch x := x.(type) { + case Var: + vars.Add(x) + + // Object keys cannot be pattern matched so only walk values. + case *object: + x.Foreach(func(_, v *Term) { + vis.Walk(v) + }) + + // Skip terms that could contain vars that cannot be pattern matched. + case Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: + return true + + case *Term: + if _, ok := x.Value.(Ref); ok { + if RootDocumentRefs.Contains(x) { + // We could support args named input, data, etc. however + // this would require rewriting terms in the head and body. + // Preventing root document shadowing is simpler, and + // arguably, will prevent confusing names from being used. + // NOTE: this check is also performed as part of strict-mode in + // checkRootDocumentOverrides. + err = fmt.Errorf("args must not shadow %v (use a different variable name)", x) + return true + } + } + } + return false + }) + + vis.Walk(rule.Head.Args) + + if err != nil { + return err + } + + ignore.Push(vars) + ignore.Push(declaredVars(rule.Body)) + + ref := rule.Head.Ref() + for i := 1; i < len(ref); i++ { + ref[i] = resolveRefsInTerm(globals, ignore, ref[i]) + } + if rule.Head.Key != nil { + rule.Head.Key = resolveRefsInTerm(globals, ignore, rule.Head.Key) + } + + if rule.Head.Value != nil { + rule.Head.Value = resolveRefsInTerm(globals, ignore, rule.Head.Value) + } + + rule.Body = resolveRefsInBody(globals, ignore, rule.Body) + return nil +} + +func resolveRefsInBody(globals map[Var]*usedRef, ignore *declaredVarStack, body Body) Body { + r := make([]*Expr, 0, len(body)) + for _, expr := range body { + r = append(r, resolveRefsInExpr(globals, ignore, expr)) + } + return r +} + +func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr *Expr) *Expr { + cpy := *expr + switch ts := expr.Terms.(type) { + case *Term: + cpy.Terms = resolveRefsInTerm(globals, ignore, ts) + case []*Term: + buf := make([]*Term, len(ts)) + for i := range ts { + buf[i] = resolveRefsInTerm(globals, ignore, ts[i]) + } + cpy.Terms = buf + case *SomeDecl: + if val, ok := ts.Symbols[0].Value.(Call); ok { + cpy.Terms = &SomeDecl{Symbols: []*Term{CallTerm(resolveRefsInTermSlice(globals, ignore, val)...)}} + } + case *Every: + locals := NewVarSet() + if ts.Key != nil { + locals.Update(ts.Key.Vars()) + } + locals.Update(ts.Value.Vars()) + ignore.Push(locals) + cpy.Terms = &Every{ + Key: ts.Key.Copy(), // TODO(sr): do more? + Value: ts.Value.Copy(), // TODO(sr): do more? + Domain: resolveRefsInTerm(globals, ignore, ts.Domain), + Body: resolveRefsInBody(globals, ignore, ts.Body), + } + ignore.Pop() + } + for _, w := range cpy.With { + w.Target = resolveRefsInTerm(globals, ignore, w.Target) + w.Value = resolveRefsInTerm(globals, ignore, w.Value) + } + return &cpy +} + +func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term *Term) *Term { + switch v := term.Value.(type) { + case Var: + if g, ok := globals[v]; ok && !ignore.Contains(v) { + cpy := g.ref.Copy() + for i := range cpy { + cpy[i].SetLocation(term.Location) + } + g.used = true + return NewTerm(cpy).SetLocation(term.Location) + } + return term + case Ref: + fqn := resolveRef(globals, ignore, v) + cpy := *term + cpy.Value = fqn + return &cpy + case *object: + cpy := *term + cpy.Value, _ = v.Map(func(k, v *Term) (*Term, *Term, error) { + k = resolveRefsInTerm(globals, ignore, k) + v = resolveRefsInTerm(globals, ignore, v) + return k, v, nil + }) + return &cpy + case *Array: + cpy := *term + cpy.Value = NewArray(resolveRefsInTermArray(globals, ignore, v)...) + return &cpy + case Call: + cpy := *term + cpy.Value = Call(resolveRefsInTermSlice(globals, ignore, v)) + return &cpy + case Set: + s, _ := v.Map(func(e *Term) (*Term, error) { + return resolveRefsInTerm(globals, ignore, e), nil + }) + cpy := *term + cpy.Value = s + return &cpy + case *ArrayComprehension: + ac := &ArrayComprehension{} + ignore.Push(declaredVars(v.Body)) + ac.Term = resolveRefsInTerm(globals, ignore, v.Term) + ac.Body = resolveRefsInBody(globals, ignore, v.Body) + cpy := *term + cpy.Value = ac + ignore.Pop() + return &cpy + case *ObjectComprehension: + oc := &ObjectComprehension{} + ignore.Push(declaredVars(v.Body)) + oc.Key = resolveRefsInTerm(globals, ignore, v.Key) + oc.Value = resolveRefsInTerm(globals, ignore, v.Value) + oc.Body = resolveRefsInBody(globals, ignore, v.Body) + cpy := *term + cpy.Value = oc + ignore.Pop() + return &cpy + case *SetComprehension: + sc := &SetComprehension{} + ignore.Push(declaredVars(v.Body)) + sc.Term = resolveRefsInTerm(globals, ignore, v.Term) + sc.Body = resolveRefsInBody(globals, ignore, v.Body) + cpy := *term + cpy.Value = sc + ignore.Pop() + return &cpy + default: + return term + } +} + +func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term { + cpy := make([]*Term, terms.Len()) + for i := range terms.Len() { + cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i)) + } + return cpy +} + +func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term { + cpy := make([]*Term, len(terms)) + for i := range terms { + cpy[i] = resolveRefsInTerm(globals, ignore, terms[i]) + } + return cpy +} + +type declaredVarStack []VarSet + +func (s declaredVarStack) Contains(v Var) bool { + for i := len(s) - 1; i >= 0; i-- { + if _, ok := s[i][v]; ok { + return ok + } + } + return false +} + +func (s declaredVarStack) Add(v Var) { + s[len(s)-1].Add(v) +} + +func (s *declaredVarStack) Push(vs VarSet) { + *s = append(*s, vs) +} + +func (s *declaredVarStack) Pop() { + curr := *s + *s = curr[:len(curr)-1] +} + +func declaredVars(x interface{}) VarSet { + vars := NewVarSet() + vis := NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case *Expr: + if x.IsAssignment() && validEqAssignArgCount(x) { + WalkVars(x.Operand(0), func(v Var) bool { + vars.Add(v) + return false + }) + } else if decl, ok := x.Terms.(*SomeDecl); ok { + for i := range decl.Symbols { + switch val := decl.Symbols[i].Value.(type) { + case Var: + vars.Add(val) + case Call: + args := val[1:] + if len(args) == 3 { // some x, y in xs + WalkVars(args[1], func(v Var) bool { + vars.Add(v) + return false + }) + } + // some x in xs + WalkVars(args[0], func(v Var) bool { + vars.Add(v) + return false + }) + } + } + } + case *ArrayComprehension, *SetComprehension, *ObjectComprehension: + return true + } + return false + }) + vis.Walk(x) + return vars +} + +// rewriteComprehensionTerms will rewrite comprehensions so that the term part +// is bound to a variable in the body. This allows any type of term to be used +// in the term part (even if the term requires evaluation.) +// +// For instance, given the following comprehension: +// +// [x[0] | x = y[_]; y = [1,2,3]] +// +// The comprehension would be rewritten as: +// +// [__local0__ | x = y[_]; y = [1,2,3]; __local0__ = x[0]] +func rewriteComprehensionTerms(f *equalityFactory, node interface{}) (interface{}, error) { + return TransformComprehensions(node, func(x interface{}) (Value, error) { + switch x := x.(type) { + case *ArrayComprehension: + if requiresEval(x.Term) { + expr := f.Generate(x.Term) + x.Term = expr.Operand(0) + x.Body.Append(expr) + } + return x, nil + case *SetComprehension: + if requiresEval(x.Term) { + expr := f.Generate(x.Term) + x.Term = expr.Operand(0) + x.Body.Append(expr) + } + return x, nil + case *ObjectComprehension: + if requiresEval(x.Key) { + expr := f.Generate(x.Key) + x.Key = expr.Operand(0) + x.Body.Append(expr) + } + if requiresEval(x.Value) { + expr := f.Generate(x.Value) + x.Value = expr.Operand(0) + x.Body.Append(expr) + } + return x, nil + } + panic("illegal type") + }) +} + +// rewriteEquals will rewrite exprs under x as unification calls instead of == +// calls. For example: +// +// data.foo == data.bar is rewritten as data.foo = data.bar +// +// This stage should only run the safety check (since == is a built-in with no +// outputs, so the inputs must not be marked as safe.) +// +// This stage is not executed by the query compiler by default because when +// callers specify == instead of = they expect to receive a true/false/undefined +// result back whereas with = the result is only ever true/undefined. For +// partial evaluation cases we do want to rewrite == to = to simplify the +// result. +func rewriteEquals(x interface{}) (modified bool) { + doubleEq := Equal.Ref() + unifyOp := Equality.Ref() + t := NewGenericTransformer(func(x interface{}) (interface{}, error) { + if x, ok := x.(*Expr); ok && x.IsCall() { + operator := x.Operator() + if operator.Equal(doubleEq) && len(x.Operands()) == 2 { + modified = true + x.SetOperator(NewTerm(unifyOp)) + } + } + return x, nil + }) + _, _ = Transform(t, x) // ignore error + return modified +} + +func rewriteTestEqualities(f *equalityFactory, body Body) Body { + result := make(Body, 0, len(body)) + for _, expr := range body { + // We can't rewrite negated expressions; if the extracted term is undefined, evaluation would fail before + // reaching the negation check. + if !expr.Negated && !expr.Generated { + switch { + case expr.IsEquality(): + terms := expr.Terms.([]*Term) + result, terms[1] = rewriteDynamicsShallow(expr, f, terms[1], result) + result, terms[2] = rewriteDynamicsShallow(expr, f, terms[2], result) + case expr.IsEvery(): + // We rewrite equalities inside of every-bodies as a fail here will be the cause of the test-rule fail. + // Failures inside other expressions with closures, such as comprehensions, won't cause the test-rule to fail, so we skip those. + every := expr.Terms.(*Every) + every.Body = rewriteTestEqualities(f, every.Body) + } + } + result = appendExpr(result, expr) + } + return result +} + +func rewriteDynamicsShallow(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch term.Value.(type) { + case Ref, *ArrayComprehension, *SetComprehension, *ObjectComprehension: + generated := f.Generate(term) + generated.With = original.With + result.Append(generated) + connectGeneratedExprs(original, generated) + return result, result[len(result)-1].Operand(0) + } + return result, term +} + +// rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and +// comprehensions) are bound to vars earlier in the query. This translation +// results in eager evaluation. +// +// For instance, given the following query: +// +// foo(data.bar) = 1 +// +// The rewritten version will be: +// +// __local0__ = data.bar; foo(__local0__) = 1 +func rewriteDynamics(f *equalityFactory, body Body) Body { + result := make(Body, 0, len(body)) + for _, expr := range body { + switch { + case expr.IsEquality(): + result = rewriteDynamicsEqExpr(f, expr, result) + case expr.IsCall(): + result = rewriteDynamicsCallExpr(f, expr, result) + case expr.IsEvery(): + result = rewriteDynamicsEveryExpr(f, expr, result) + default: + result = rewriteDynamicsTermExpr(f, expr, result) + } + } + return result +} + +func appendExpr(body Body, expr *Expr) Body { + body.Append(expr) + return body +} + +func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body { + if !validEqAssignArgCount(expr) { + return appendExpr(result, expr) + } + terms := expr.Terms.([]*Term) + result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result) + result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result) + return appendExpr(result, expr) +} + +func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body { + terms := expr.Terms.([]*Term) + for i := 1; i < len(terms); i++ { + result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result) + } + return appendExpr(result, expr) +} + +func rewriteDynamicsEveryExpr(f *equalityFactory, expr *Expr, result Body) Body { + ev := expr.Terms.(*Every) + result, ev.Domain = rewriteDynamicsOne(expr, f, ev.Domain, result) + ev.Body = rewriteDynamics(f, ev.Body) + return appendExpr(result, expr) +} + +func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body { + term := expr.Terms.(*Term) + result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result) + return appendExpr(result, expr) +} + +func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch v := term.Value.(type) { + case Ref: + for i := 1; i < len(v); i++ { + result, v[i] = rewriteDynamicsOne(original, f, v[i], result) + } + case *ArrayComprehension: + v.Body = rewriteDynamics(f, v.Body) + case *SetComprehension: + v.Body = rewriteDynamics(f, v.Body) + case *ObjectComprehension: + v.Body = rewriteDynamics(f, v.Body) + default: + result, term = rewriteDynamicsOne(original, f, term, result) + } + return result, term +} + +func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch v := term.Value.(type) { + case Ref: + for i := 1; i < len(v); i++ { + result, v[i] = rewriteDynamicsOne(original, f, v[i], result) + } + generated := f.Generate(term) + generated.With = original.With + result.Append(generated) + connectGeneratedExprs(original, generated) + return result, result[len(result)-1].Operand(0) + case *Array: + for i := range v.Len() { + var t *Term + result, t = rewriteDynamicsOne(original, f, v.Elem(i), result) + v.set(i, t) + } + return result, term + case *object: + cpy := NewObject() + v.Foreach(func(key, value *Term) { + result, key = rewriteDynamicsOne(original, f, key, result) + result, value = rewriteDynamicsOne(original, f, value, result) + cpy.Insert(key, value) + }) + return result, NewTerm(cpy).SetLocation(term.Location) + case Set: + cpy := NewSet() + for _, term := range v.Slice() { + var rw *Term + result, rw = rewriteDynamicsOne(original, f, term, result) + cpy.Add(rw) + } + return result, NewTerm(cpy).SetLocation(term.Location) + case *ArrayComprehension: + var extra *Expr + v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) + result.Append(extra) + connectGeneratedExprs(original, extra) + return result, result[len(result)-1].Operand(0) + case *SetComprehension: + var extra *Expr + v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) + result.Append(extra) + connectGeneratedExprs(original, extra) + return result, result[len(result)-1].Operand(0) + case *ObjectComprehension: + var extra *Expr + v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) + result.Append(extra) + connectGeneratedExprs(original, extra) + return result, result[len(result)-1].Operand(0) + } + return result, term +} + +func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body Body, term *Term) (Body, *Expr) { + body = rewriteDynamics(f, body) + generated := f.Generate(term) + generated.With = original.With + return body, generated +} + +func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) { + for i := range rule.Head.Args { + support, output := expandExprTerm(gen, rule.Head.Args[i]) + for j := range support { + rule.Body.Append(support[j]) + } + rule.Head.Args[i] = output + } + if rule.Head.Key != nil { + support, output := expandExprTerm(gen, rule.Head.Key) + for i := range support { + rule.Body.Append(support[i]) + } + rule.Head.Key = output + } + if rule.Head.Value != nil { + support, output := expandExprTerm(gen, rule.Head.Value) + for i := range support { + rule.Body.Append(support[i]) + } + rule.Head.Value = output + } +} + +func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body { + cpy := make(Body, 0, len(body)) + for i := range body { + for _, expr := range expandExpr(gen, body[i]) { + cpy.Append(expr) + } + } + return cpy +} + +func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { + for i := range expr.With { + extras, value := expandExprTerm(gen, expr.With[i].Value) + expr.With[i].Value = value + result = append(result, extras...) + } + switch terms := expr.Terms.(type) { + case *Term: + extras, term := expandExprTerm(gen, terms) + if len(expr.With) > 0 { + for i := range extras { + extras[i].With = expr.With + } + } + result = append(result, extras...) + expr.Terms = term + result = append(result, expr) + case []*Term: + for i := 1; i < len(terms); i++ { + var extras []*Expr + extras, terms[i] = expandExprTerm(gen, terms[i]) + connectGeneratedExprs(expr, extras...) + if len(expr.With) > 0 { + for i := range extras { + extras[i].With = expr.With + } + } + result = append(result, extras...) + } + result = append(result, expr) + case *Every: + var extras []*Expr + + term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location) + eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location) + eq.Generated = true + eq.With = expr.With + extras = expandExpr(gen, eq) + terms.Domain = term + + terms.Body = rewriteExprTermsInBody(gen, terms.Body) + result = append(result, extras...) + result = append(result, expr) + } + return +} + +func connectGeneratedExprs(parent *Expr, children ...*Expr) { + for _, child := range children { + child.generatedFrom = parent + parent.generates = append(parent.generates, child) + } +} + +func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) { + output = term + switch v := term.Value.(type) { + case Call: + for i := 1; i < len(v); i++ { + var extras []*Expr + extras, v[i] = expandExprTerm(gen, v[i]) + support = append(support, extras...) + } + output = NewTerm(gen.Generate()).SetLocation(term.Location) + expr := v.MakeExpr(output).SetLocation(term.Location) + expr.Generated = true + support = append(support, expr) + case Ref: + support = expandExprRef(gen, v) + case *Array: + support = expandExprTermArray(gen, v) + case *object: + cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { + extras1, expandedKey := expandExprTerm(gen, k) + extras2, expandedValue := expandExprTerm(gen, v) + support = append(support, extras1...) + support = append(support, extras2...) + return expandedKey, expandedValue, nil + }) + output = NewTerm(cpy).SetLocation(term.Location) + case Set: + cpy, _ := v.Map(func(x *Term) (*Term, error) { + extras, expanded := expandExprTerm(gen, x) + support = append(support, extras...) + return expanded, nil + }) + output = NewTerm(cpy).SetLocation(term.Location) + case *ArrayComprehension: + support, term := expandExprTerm(gen, v.Term) + for i := range support { + v.Body.Append(support[i]) + } + v.Term = term + v.Body = rewriteExprTermsInBody(gen, v.Body) + case *SetComprehension: + support, term := expandExprTerm(gen, v.Term) + for i := range support { + v.Body.Append(support[i]) + } + v.Term = term + v.Body = rewriteExprTermsInBody(gen, v.Body) + case *ObjectComprehension: + support, key := expandExprTerm(gen, v.Key) + for i := range support { + v.Body.Append(support[i]) + } + v.Key = key + support, value := expandExprTerm(gen, v.Value) + for i := range support { + v.Body.Append(support[i]) + } + v.Value = value + v.Body = rewriteExprTermsInBody(gen, v.Body) + } + return +} + +func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) { + // Start by calling a normal expandExprTerm on all terms. + support = expandExprTermSlice(gen, v) + + // Rewrite references in order to support indirect references. We rewrite + // e.g. + // + // [1, 2, 3][i] + // + // to + // + // __local_var = [1, 2, 3] + // __local_var[i] + // + // to support these. This only impacts the reference subject, i.e. the + // first item in the slice. + var subject = v[0] + switch subject.Value.(type) { + case *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: + f := newEqualityFactory(gen) + assignToLocal := f.Generate(subject) + support = append(support, assignToLocal) + v[0] = assignToLocal.Operand(0) + } + return +} + +func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) { + for i := range arr.Len() { + extras, v := expandExprTerm(gen, arr.Elem(i)) + arr.set(i, v) + support = append(support, extras...) + } + return +} + +func expandExprTermSlice(gen *localVarGenerator, v []*Term) (support []*Expr) { + for i := 0; i < len(v); i++ { + var extras []*Expr + extras, v[i] = expandExprTerm(gen, v[i]) + support = append(support, extras...) + } + return +} + +type localDeclaredVars struct { + vars []*declaredVarSet + + // rewritten contains a mapping of *all* user-defined variables + // that have been rewritten whereas vars contains the state + // from the current query (not any nested queries, and all vars + // seen). + rewritten map[Var]Var + + // indicates if an assignment (:= operator) has been seen *ever* + assignment bool +} + +type varOccurrence int + +const ( + newVar varOccurrence = iota + argVar + seenVar + assignedVar + declaredVar +) + +type declaredVarSet struct { + vs map[Var]Var + reverse map[Var]Var + occurrence map[Var]varOccurrence + count map[Var]int +} + +func newDeclaredVarSet() *declaredVarSet { + return &declaredVarSet{ + vs: map[Var]Var{}, + reverse: map[Var]Var{}, + occurrence: map[Var]varOccurrence{}, + count: map[Var]int{}, + } +} + +func newLocalDeclaredVars() *localDeclaredVars { + return &localDeclaredVars{ + vars: []*declaredVarSet{newDeclaredVarSet()}, + rewritten: map[Var]Var{}, + } +} + +func (s *localDeclaredVars) Copy() *localDeclaredVars { + stack := &localDeclaredVars{ + vars: []*declaredVarSet{}, + rewritten: map[Var]Var{}, + } + + for i := range s.vars { + stack.vars = append(stack.vars, newDeclaredVarSet()) + maps.Copy(stack.vars[0].vs, s.vars[i].vs) + maps.Copy(stack.vars[0].reverse, s.vars[i].reverse) + maps.Copy(stack.vars[0].occurrence, s.vars[i].occurrence) + maps.Copy(stack.vars[0].count, s.vars[i].count) + } + + maps.Copy(stack.rewritten, s.rewritten) + + return stack +} + +func (s *localDeclaredVars) Push() { + s.vars = append(s.vars, newDeclaredVarSet()) +} + +func (s *localDeclaredVars) Pop() *declaredVarSet { + sl := s.vars + curr := sl[len(sl)-1] + s.vars = sl[:len(sl)-1] + return curr +} + +func (s localDeclaredVars) Peek() *declaredVarSet { + return s.vars[len(s.vars)-1] +} + +func (s localDeclaredVars) Insert(x, y Var, occurrence varOccurrence) { + elem := s.vars[len(s.vars)-1] + elem.vs[x] = y + elem.reverse[y] = x + elem.occurrence[x] = occurrence + + elem.count[x] = 1 + + // If the variable has been rewritten (where x != y, with y being + // the generated value), store it in the map of rewritten vars. + // Assume that the generated values are unique for the compilation. + if !x.Equal(y) { + s.rewritten[y] = x + } +} + +func (s localDeclaredVars) Declared(x Var) (y Var, ok bool) { + for i := len(s.vars) - 1; i >= 0; i-- { + if y, ok = s.vars[i].vs[x]; ok { + return + } + } + return +} + +// Occurrence returns a flag that indicates whether x has occurred in the +// current scope. +func (s localDeclaredVars) Occurrence(x Var) varOccurrence { + return s.vars[len(s.vars)-1].occurrence[x] +} + +// GlobalOccurrence returns a flag that indicates whether x has occurred in the +// global scope. +func (s localDeclaredVars) GlobalOccurrence(x Var) (varOccurrence, bool) { + for i := len(s.vars) - 1; i >= 0; i-- { + if occ, ok := s.vars[i].occurrence[x]; ok { + return occ, true + } + } + return newVar, false +} + +// Seen marks x as seen by incrementing its counter +func (s localDeclaredVars) Seen(x Var) { + for i := len(s.vars) - 1; i >= 0; i-- { + dvs := s.vars[i] + if c, ok := dvs.count[x]; ok { + dvs.count[x] = c + 1 + return + } + } + + s.vars[len(s.vars)-1].count[x] = 1 +} + +// Count returns how many times x has been seen +func (s localDeclaredVars) Count(x Var) int { + for i := len(s.vars) - 1; i >= 0; i-- { + if c, ok := s.vars[i].count[x]; ok { + return c + } + } + + return 0 +} + +// rewriteLocalVars rewrites bodies to remove assignment/declaration +// expressions. For example: +// +// a := 1; p[a] +// +// Is rewritten to: +// +// __local0__ = 1; p[__local0__] +// +// During rewriting, assignees are validated to prevent use before declaration. +func rewriteLocalVars(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, strict bool) (Body, map[Var]Var, Errors) { + var errs Errors + body, errs = rewriteDeclaredVarsInBody(g, stack, used, body, errs, strict) + return body, stack.Peek().vs, errs +} + +func rewriteDeclaredVarsInBody(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, errs Errors, strict bool) (Body, Errors) { + + var cpy Body + + for i := range body { + var expr *Expr + switch { + case body[i].IsAssignment(): + stack.assignment = true + expr, errs = rewriteDeclaredAssignment(g, stack, body[i], errs, strict) + case body[i].IsSome(): + expr, errs = rewriteSomeDeclStatement(g, stack, body[i], errs, strict) + case body[i].IsEvery(): + expr, errs = rewriteEveryStatement(g, stack, body[i], errs, strict) + default: + expr, errs = rewriteDeclaredVarsInExpr(g, stack, body[i], errs, strict) + } + if expr != nil { + cpy.Append(expr) + } + } + + // If the body only contained a var statement it will be empty at this + // point. Append true to the body to ensure that it's non-empty (zero length + // bodies are not supported.) + if len(cpy) == 0 { + cpy.Append(NewExpr(BooleanTerm(true))) + } + + errs = checkUnusedAssignedVars(body, stack, used, errs, strict) + return cpy, checkUnusedDeclaredVars(body, stack, used, cpy, errs) +} + +func checkUnusedAssignedVars(body Body, stack *localDeclaredVars, used VarSet, errs Errors, strict bool) Errors { + + if !strict || len(errs) > 0 { + return errs + } + + dvs := stack.Peek() + unused := NewVarSet() + + for v, occ := range dvs.occurrence { + // A var that was assigned in this scope must have been seen (used) more than once (the time of assignment) in + // the same, or nested, scope to be counted as used. + if !v.IsWildcard() && stack.Count(v) <= 1 && occ == assignedVar { + unused.Add(dvs.vs[v]) + } + } + + rewrittenUsed := NewVarSet() + for v := range used { + if gv, ok := stack.Declared(v); ok { + rewrittenUsed.Add(gv) + } else { + rewrittenUsed.Add(v) + } + } + + unused = unused.Diff(rewrittenUsed) + + for _, gv := range unused.Sorted() { + found := false + for i := range body { + if body[i].Vars(VarVisitorParams{}).Contains(gv) { + errs = append(errs, NewError(CompileErr, body[i].Loc(), "assigned var %v unused", dvs.reverse[gv])) + found = true + break + } + } + if !found { + errs = append(errs, NewError(CompileErr, body[0].Loc(), "assigned var %v unused", dvs.reverse[gv])) + } + } + + return errs +} + +func checkUnusedDeclaredVars(body Body, stack *localDeclaredVars, used VarSet, cpy Body, errs Errors) Errors { + + // NOTE(tsandall): Do not generate more errors if there are existing + // declaration errors. + if len(errs) > 0 { + return errs + } + + dvs := stack.Peek() + declared := NewVarSet() + + for v, occ := range dvs.occurrence { + if occ == declaredVar { + declared.Add(dvs.vs[v]) + } + } + + bodyvars := cpy.Vars(VarVisitorParams{}) + + for v := range used { + if gv, ok := stack.Declared(v); ok { + bodyvars.Add(gv) + } else { + bodyvars.Add(v) + } + } + + unused := declared.Diff(bodyvars).Diff(used) + + for _, gv := range unused.Sorted() { + rv := dvs.reverse[gv] + if !rv.IsGenerated() { + // Scan through body exprs, looking for a match between the + // bad var's original name, and each expr's declared vars. + foundUnusedVarByName := false + for i := range body { + varsDeclaredInExpr := declaredVars(body[i]) + if varsDeclaredInExpr.Contains(dvs.reverse[gv]) { + // TODO(philipc): Clean up the offset logic here when the parser + // reports more accurate locations. + errs = append(errs, NewError(CompileErr, body[i].Loc(), "declared var %v unused", dvs.reverse[gv])) + foundUnusedVarByName = true + break + } + } + // Default error location returned. + if !foundUnusedVarByName { + errs = append(errs, NewError(CompileErr, body[0].Loc(), "declared var %v unused", dvs.reverse[gv])) + } + } + } + + return errs +} + +func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + e := expr.Copy() + every := e.Terms.(*Every) + + errs = rewriteDeclaredVarsInTermRecursive(g, stack, every.Domain, errs, strict) + + stack.Push() + defer stack.Pop() + + // if the key exists, rewrite + if every.Key != nil { + if v := every.Key.Value.(Var); !v.IsWildcard() { + gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) + if err != nil { + return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) //nolint:govet + } + every.Key.Value = gv + } + } else { // if the key doesn't exist, add dummy local + every.Key = NewTerm(g.Generate()) + } + + // value is always present + if v := every.Value.Value.(Var); !v.IsWildcard() { + gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) + if err != nil { + return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) //nolint:govet + } + every.Value.Value = gv + } + + used := NewVarSet() + every.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, every.Body, errs, strict) + + return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) +} + +func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + e := expr.Copy() + decl := e.Terms.(*SomeDecl) + for i := range decl.Symbols { + switch v := decl.Symbols[i].Value.(type) { + case Var: + if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil { + return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) //nolint:govet + } + case Call: + var key, val, container *Term + switch len(v) { + case 4: // member3 + key = v[1] + val = v[2] + container = v[3] + case 3: // member + key = NewTerm(g.Generate()) + val = v[1] + container = v[2] + } + + var rhs *Term + switch c := container.Value.(type) { + case Ref: + rhs = RefTerm(append(c, key)...) + default: + rhs = RefTerm(container, key) + } + e.Terms = []*Term{ + RefTerm(VarTerm(Equality.Name)), val, rhs, + } + + for _, v0 := range outputVarsForExprEq(e, container.Vars()).Sorted() { + if _, err := rewriteDeclaredVar(g, stack, v0, declaredVar); err != nil { + return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) //nolint:govet + } + } + return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) + } + } + return nil, errs +} + +func rewriteDeclaredVarsInExpr(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + vis := NewGenericVisitor(func(x interface{}) bool { + var stop bool + switch x := x.(type) { + case *Term: + stop, errs = rewriteDeclaredVarsInTerm(g, stack, x, errs, strict) + case *With: + stop, errs = true, rewriteDeclaredVarsInWithRecursive(g, stack, x, errs, strict) + } + return stop + }) + vis.Walk(expr) + return expr, errs +} + +func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + + if expr.Negated { + errs = append(errs, NewError(CompileErr, expr.Location, "cannot assign vars inside negated expression")) + return expr, errs + } + + numErrsBefore := len(errs) + + if !validEqAssignArgCount(expr) { + return expr, errs + } + + // Rewrite terms on right hand side capture seen vars and recursively + // process comprehensions before left hand side is processed. Also + // rewrite with modifier. + errs = rewriteDeclaredVarsInTermRecursive(g, stack, expr.Operand(1), errs, strict) + + for _, w := range expr.With { + errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) + } + + // Rewrite vars on left hand side with unique names. Catch redeclaration + // and invalid term types here. + var vis func(t *Term) bool + + vis = func(t *Term) bool { + switch v := t.Value.(type) { + case Var: + if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil { + errs = append(errs, NewError(CompileErr, t.Location, err.Error())) //nolint:govet + } else { + t.Value = gv + } + return true + case *Array: + return false + case *object: + v.Foreach(func(_, v *Term) { + WalkTerms(v, vis) + }) + return true + case Ref: + if RootDocumentRefs.Contains(t) { + if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil { + errs = append(errs, NewError(CompileErr, t.Location, err.Error())) //nolint:govet + } else { + t.Value = gv + } + return true + } + } + errs = append(errs, NewError(CompileErr, t.Location, "cannot assign to %v", ValueName(t.Value))) + return true + } + + WalkTerms(expr.Operand(0), vis) + + if len(errs) == numErrsBefore { + loc := expr.Operator()[0].Location + expr.SetOperator(RefTerm(VarTerm(Equality.Name).SetLocation(loc)).SetLocation(loc)) + } + + return expr, errs +} + +func rewriteDeclaredVarsInTerm(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) (bool, Errors) { + switch v := term.Value.(type) { + case Var: + if gv, ok := stack.Declared(v); ok { + term.Value = gv + stack.Seen(v) + } else if stack.Occurrence(v) == newVar { + stack.Insert(v, v, seenVar) + } + case Ref: + if RootDocumentRefs.Contains(term) { + x := v[0].Value.(Var) + if occ, ok := stack.GlobalOccurrence(x); ok && occ != seenVar { + gv, _ := stack.Declared(x) + term.Value = gv + } + + return true, errs + } + return false, errs + case Call: + ref := v[0] + WalkVars(ref, func(v Var) bool { + if gv, ok := stack.Declared(v); ok && !gv.Equal(v) { + // We will rewrite the ref of a function call, which is never ok since we don't have first-class functions. + errs = append(errs, NewError(CompileErr, term.Location, "called function %s shadowed", ref)) + return true + } + return false + }) + return false, errs + case *object: + cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { + kcpy := k.Copy() + errs = rewriteDeclaredVarsInTermRecursive(g, stack, kcpy, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v, errs, strict) + return kcpy, v, nil + }) + term.Value = cpy + case Set: + cpy, _ := v.Map(func(elem *Term) (*Term, error) { + elemcpy := elem.Copy() + errs = rewriteDeclaredVarsInTermRecursive(g, stack, elemcpy, errs, strict) + return elemcpy, nil + }) + term.Value = cpy + case *ArrayComprehension: + errs = rewriteDeclaredVarsInArrayComprehension(g, stack, v, errs, strict) + case *SetComprehension: + errs = rewriteDeclaredVarsInSetComprehension(g, stack, v, errs, strict) + case *ObjectComprehension: + errs = rewriteDeclaredVarsInObjectComprehension(g, stack, v, errs, strict) + default: + return false, errs + } + return true, errs +} + +func rewriteDeclaredVarsInTermRecursive(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) Errors { + WalkTerms(term, func(t *Term) bool { + var stop bool + stop, errs = rewriteDeclaredVarsInTerm(g, stack, t, errs, strict) + return stop + }) + return errs +} + +func rewriteDeclaredVarsInWithRecursive(g *localVarGenerator, stack *localDeclaredVars, w *With, errs Errors, strict bool) Errors { + // NOTE(sr): `with input as` and `with input.a.b.c as` are deliberately skipped here: `input` could + // have been shadowed by a local variable/argument but should NOT be replaced in the `with` target. + // + // We cannot drop `input` from the stack since it's conceivable to do `with input[input] as` where + // the second input is meant to be the local var. It's a terrible idea, but when you're shadowing + // `input` those might be your thing. + errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Target, errs, strict) + if sdwInput, ok := stack.Declared(InputRootDocument.Value.(Var)); ok { // Was "input" shadowed... + switch value := w.Target.Value.(type) { + case Var: + if sdwInput.Equal(value) { // ...and replaced? If so, fix it + w.Target.Value = InputRootRef + } + case Ref: + if sdwInput.Equal(value[0].Value.(Var)) { + w.Target.Value.(Ref)[0].Value = InputRootDocument.Value + } + } + } + // No special handling of the `with` value + return rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) +} + +func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors, strict bool) Errors { + used := NewVarSet() + used.Update(v.Term.Vars()) + + stack.Push() + v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) + stack.Pop() + return errs +} + +func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors, strict bool) Errors { + used := NewVarSet() + used.Update(v.Term.Vars()) + + stack.Push() + v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) + stack.Pop() + return errs +} + +func rewriteDeclaredVarsInObjectComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ObjectComprehension, errs Errors, strict bool) Errors { + used := NewVarSet() + used.Update(v.Key.Vars()) + used.Update(v.Value.Vars()) + + stack.Push() + v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Key, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Value, errs, strict) + stack.Pop() + return errs +} + +func rewriteDeclaredVar(g *localVarGenerator, stack *localDeclaredVars, v Var, occ varOccurrence) (gv Var, err error) { + switch stack.Occurrence(v) { + case seenVar: + return gv, fmt.Errorf("var %v referenced above", v) + case assignedVar: + return gv, fmt.Errorf("var %v assigned above", v) + case declaredVar: + return gv, fmt.Errorf("var %v declared above", v) + case argVar: + return gv, fmt.Errorf("arg %v redeclared", v) + } + gv = g.Generate() + stack.Insert(v, gv, occ) + return +} + +// rewriteWithModifiersInBody will rewrite the body so that with modifiers do +// not contain terms that require evaluation as values. If this function +// encounters an invalid with modifier target then it will raise an error. +func rewriteWithModifiersInBody(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, body Body) (Body, *Error) { + var result Body + for i := range body { + exprs, err := rewriteWithModifier(c, unsafeBuiltinsMap, f, body[i]) + if err != nil { + return nil, err + } + if len(exprs) > 0 { + for _, expr := range exprs { + result.Append(expr) + } + } else { + result.Append(body[i]) + } + } + return result, nil +} + +func rewriteWithModifier(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, expr *Expr) ([]*Expr, *Error) { + + var result []*Expr + for i := range expr.With { + eval, err := validateWith(c, unsafeBuiltinsMap, expr, i) + if err != nil { + return nil, err + } + + if eval { + eq := f.Generate(expr.With[i].Value) + result = append(result, eq) + expr.With[i].Value = eq.Operand(0) + } + } + + return append(result, expr), nil +} + +func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr, i int) (bool, *Error) { + target, value := expr.With[i].Target, expr.With[i].Value + + // Ensure that values that are built-ins are rewritten to Ref (not Var) + if v, ok := value.Value.(Var); ok { + if _, ok := c.builtins[v.String()]; ok { + value.Value = Ref([]*Term{NewTerm(v)}) + } + } + isBuiltinRefOrVar, err := isBuiltinRefOrVar(c.builtins, unsafeBuiltinsMap, target) + if err != nil { + return false, err + } + + isAllowedUnknownFuncCall := false + if c.allowUndefinedFuncCalls { + switch target.Value.(type) { + case Ref, Var: + isAllowedUnknownFuncCall = true + } + } + + switch { + case isDataRef(target): + ref := target.Value.(Ref) + targetNode := c.RuleTree + for i := range len(ref) - 1 { + child := targetNode.Child(ref[i].Value) + if child == nil { + break + } else if len(child.Values) > 0 { + return false, NewError(CompileErr, target.Loc(), "with keyword cannot partially replace virtual document(s)") + } + targetNode = child + } + + if targetNode != nil { + // NOTE(sr): at this point in the compiler stages, we don't have a fully-populated + // TypeEnv yet -- so we have to make do with this check to see if the replacement + // target is a function. It's probably wrong for arity-0 functions, but those are + // and edge case anyways. + if child := targetNode.Child(ref[len(ref)-1].Value); child != nil { + for _, v := range child.Values { + if len(v.(*Rule).Head.Args) > 0 { + if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { + return false, err // err may be nil + } + } + } + } + } + + // If the with-value is a ref to a function, but not a call, we can't rewrite it + if r, ok := value.Value.(Ref); ok { + // TODO: check that target ref doesn't exist? + if valueNode := c.RuleTree.Find(r); valueNode != nil { + for _, v := range valueNode.Values { + if len(v.(*Rule).Head.Args) > 0 { + return false, nil + } + } + } + } + case isInputRef(target): // ok, valid + case isBuiltinRefOrVar: + + // NOTE(sr): first we ensure that parsed Var builtins (`count`, `concat`, etc) + // are rewritten to their proper Ref convention + if v, ok := target.Value.(Var); ok { + target.Value = Ref([]*Term{NewTerm(v)}) + } + + targetRef := target.Value.(Ref) + bi := c.builtins[targetRef.String()] // safe because isBuiltinRefOrVar checked this + if err := validateWithBuiltinTarget(bi, targetRef, target.Loc()); err != nil { + return false, err + } + + if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { + return false, err // err may be nil + } + case isAllowedUnknownFuncCall: + // The target isn't a ref to the input doc, data doc, or a known built-in, but it might be a ref to an unknown built-in. + return false, nil + default: + return false, NewError(TypeErr, target.Location, "with keyword target must reference existing %v, %v, or a function", InputRootDocument, DefaultRootDocument) + } + return requiresEval(value), nil +} + +func validateWithBuiltinTarget(bi *Builtin, target Ref, loc *location.Location) *Error { + switch bi.Name { + case Equality.Name, + RegoMetadataChain.Name, + RegoMetadataRule.Name: + return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of %q invalid", bi.Name) + } + + switch { + case target.HasPrefix(Ref([]*Term{VarTerm("internal")})): + return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of internal function %q invalid", target) + + case bi.Relation: + return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a relation") + + case bi.Decl.Result() == nil: + return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a void function") + } + return nil +} + +func validateWithFunctionValue(bs map[string]*Builtin, unsafeMap map[string]struct{}, ruleTree *TreeNode, value *Term) (bool, *Error) { + if v, ok := value.Value.(Ref); ok { + if ruleTree.Find(v) != nil { // ref exists in rule tree + return true, nil + } + } + return isBuiltinRefOrVar(bs, unsafeMap, value) +} + +func isInputRef(term *Term) bool { + if ref, ok := term.Value.(Ref); ok { + if ref.HasPrefix(InputRootRef) { + return true + } + } + return false +} + +func isDataRef(term *Term) bool { + if ref, ok := term.Value.(Ref); ok { + if ref.HasPrefix(DefaultRootRef) { + return true + } + } + return false +} + +func isBuiltinRefOrVar(bs map[string]*Builtin, unsafeBuiltinsMap map[string]struct{}, term *Term) (bool, *Error) { + switch v := term.Value.(type) { + case Ref, Var: + if _, ok := unsafeBuiltinsMap[v.String()]; ok { + return false, NewError(CompileErr, term.Location, "with keyword replacing built-in function: target must not be unsafe: %q", v) + } + _, ok := bs[v.String()] + return ok, nil + } + return false, nil +} + +func isVirtual(node *TreeNode, ref Ref) bool { + for i := range ref { + child := node.Child(ref[i].Value) + if child == nil { + return false + } else if len(child.Values) > 0 { + return true + } + node = child + } + return true +} + +func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) { + + if len(unsafe) == 0 { + return + } + + for _, pair := range unsafe.Vars() { + v := pair.Var + if w, ok := rewritten[v]; ok { + v = w + } + if !v.IsGenerated() { + if _, ok := allFutureKeywords[string(v)]; ok { + result = append(result, NewError(UnsafeVarErr, pair.Loc, + "var %[1]v is unsafe (hint: `import future.keywords.%[1]v` to import a future keyword)", v)) + continue + } + result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %v is unsafe", v)) + } + } + + if len(result) > 0 { + return + } + + // If the expression contains unsafe generated variables, report which + // expressions are unsafe instead of the variables that are unsafe (since + // the latter are not meaningful to the user.) + pairs := unsafe.Slice() + + slices.SortFunc(pairs, func(a, b unsafePair) int { + return a.Expr.Location.Compare(b.Expr.Location) + }) + + // Report at most one error per generated variable. + seen := NewVarSet() + + for _, expr := range pairs { + before := len(seen) + for v := range expr.Vars { + if v.IsGenerated() { + seen.Add(v) + } + } + if len(seen) > before { + result = append(result, NewError(UnsafeVarErr, expr.Expr.Location, "expression is unsafe")) + } + } + + return +} + +func checkUnsafeBuiltins(unsafeBuiltinsMap map[string]struct{}, node interface{}) Errors { + errs := make(Errors, 0) + WalkExprs(node, func(x *Expr) bool { + if x.IsCall() { + operator := x.Operator().String() + if _, ok := unsafeBuiltinsMap[operator]; ok { + errs = append(errs, NewError(TypeErr, x.Loc(), "unsafe built-in function calls in expression: %v", operator)) + } + } + return false + }) + return errs +} + +func rewriteVarsInRef(vars ...map[Var]Var) varRewriter { + return func(node Ref) Ref { + i, _ := TransformVars(node, func(v Var) (Value, error) { + for _, m := range vars { + if u, ok := m[v]; ok { + return u, nil + } + } + return v, nil + }) + return i.(Ref) + } +} + +// NOTE(sr): This is duplicated with compile/compile.go; but moving it into another location +// would cause a circular dependency -- the refSet definition needs ast.Ref. If we make it +// public in the ast package, the compile package could take it from there, but it would also +// increase our public interface. Let's reconsider if we need it in a third place. +type refSet struct { + s []Ref +} + +func newRefSet(x ...Ref) *refSet { + result := &refSet{} + for i := range x { + result.AddPrefix(x[i]) + } + return result +} + +// ContainsPrefix returns true if r is prefixed by any of the existing refs in the set. +func (rs *refSet) ContainsPrefix(r Ref) bool { + return slices.ContainsFunc(rs.s, r.HasPrefix) +} + +// AddPrefix inserts r into the set if r is not prefixed by any existing +// refs in the set. If any existing refs are prefixed by r, those existing +// refs are removed. +func (rs *refSet) AddPrefix(r Ref) { + if rs.ContainsPrefix(r) { + return + } + cpy := []Ref{r} + for i := range rs.s { + if !rs.s[i].HasPrefix(r) { + cpy = append(cpy, rs.s[i]) + } + } + rs.s = cpy +} + +// Sorted returns a sorted slice of terms for refs in the set. +func (rs *refSet) Sorted() []*Term { + terms := make([]*Term, len(rs.s)) + for i := range rs.s { + terms[i] = NewTerm(rs.s[i]) + } + slices.SortFunc(terms, TermValueCompare) + return terms +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go new file mode 100644 index 00000000000..7d81d45e6d2 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go @@ -0,0 +1,62 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +// CompileModules takes a set of Rego modules represented as strings and +// compiles them for evaluation. The keys of the map are used as filenames. +func CompileModules(modules map[string]string) (*Compiler, error) { + return CompileModulesWithOpt(modules, CompileOpts{}) +} + +// CompileOpts defines a set of options for the compiler. +type CompileOpts struct { + EnablePrintStatements bool + ParserOptions ParserOptions +} + +// CompileModulesWithOpt takes a set of Rego modules represented as strings and +// compiles them for evaluation. The keys of the map are used as filenames. +func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compiler, error) { + + parsed := make(map[string]*Module, len(modules)) + + for f, module := range modules { + var pm *Module + var err error + if pm, err = ParseModuleWithOpts(f, module, opts.ParserOptions); err != nil { + return nil, err + } + parsed[f] = pm + } + + compiler := NewCompiler(). + WithDefaultRegoVersion(opts.ParserOptions.RegoVersion). + WithEnablePrintStatements(opts.EnablePrintStatements) + compiler.Compile(parsed) + + if compiler.Failed() { + return nil, compiler.Errors + } + + return compiler, nil +} + +// MustCompileModules compiles a set of Rego modules represented as strings. If +// the compilation process fails, this function panics. +func MustCompileModules(modules map[string]string) *Compiler { + return MustCompileModulesWithOpts(modules, CompileOpts{}) +} + +// MustCompileModulesWithOpts compiles a set of Rego modules represented as strings. If +// the compilation process fails, this function panics. +func MustCompileModulesWithOpts(modules map[string]string, opts CompileOpts) *Compiler { + + compiler, err := CompileModulesWithOpt(modules, opts) + if err != nil { + panic(err) + } + + return compiler +} diff --git a/vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compilemetrics.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go rename to vendor/github.com/open-policy-agent/opa/v1/ast/compilemetrics.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go b/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go new file mode 100644 index 00000000000..685cc6b6943 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go @@ -0,0 +1,79 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "slices" + "strings" +) + +// CheckPathConflicts returns a set of errors indicating paths that +// are in conflict with the result of the provided callable. +func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors { + var errs Errors + + root := c.RuleTree.Child(DefaultRootDocument.Value) + if root == nil { + return nil + } + + if len(c.pathConflictCheckRoots) == 0 || slices.Contains(c.pathConflictCheckRoots, "") { + for _, child := range root.Children { + errs = append(errs, checkDocumentConflicts(child, exists, nil)...) + } + return errs + } + + for _, rootPath := range c.pathConflictCheckRoots { + // traverse AST from `path` to go to the new root + paths := strings.Split(rootPath, "/") + node := root + for _, key := range paths { + node = node.Child(String(key)) + if node == nil { + break + } + } + + if node == nil { + // could not find the node from the AST (e.g. `path` is from a data file) + // then no conflict is possible + continue + } + + for _, child := range node.Children { + errs = append(errs, checkDocumentConflicts(child, exists, paths)...) + } + } + + return errs +} + +func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors { + + switch key := node.Key.(type) { + case String: + path = append(path, string(key)) + default: // other key types cannot conflict with data + return nil + } + + if len(node.Values) > 0 { + s := strings.Join(path, "/") + if ok, err := exists(path); err != nil { + return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())} + } else if ok { + return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)} + } + } + + var errs Errors + + for _, child := range node.Children { + errs = append(errs, checkDocumentConflicts(child, exists, path)...) + } + + return errs +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go b/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go new file mode 100644 index 00000000000..62b04e301ee --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go @@ -0,0 +1,36 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine. +// +// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc. +// +// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows: +// +// Module +// | +// +--- Package (Reference) +// | +// +--- Imports +// | | +// | +--- Import (Term) +// | +// +--- Rules +// | +// +--- Rule +// | +// +--- Head +// | | +// | +--- Name (Variable) +// | | +// | +--- Key (Term) +// | | +// | +--- Value (Term) +// | +// +--- Body +// | +// +--- Expression (Term | Terms | Variable Declaration) +// +// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports. +package ast diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/env.go b/vendor/github.com/open-policy-agent/opa/v1/ast/env.go new file mode 100644 index 00000000000..9bffd03e0aa --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/env.go @@ -0,0 +1,528 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "strings" + + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// TypeEnv contains type info for static analysis such as type checking. +type TypeEnv struct { + tree *typeTreeNode + next *TypeEnv + newChecker func() *typeChecker +} + +// newTypeEnv returns an empty TypeEnv. The constructor is not exported because +// type environments should only be created by the type checker. +func newTypeEnv(f func() *typeChecker) *TypeEnv { + return &TypeEnv{ + tree: newTypeTree(), + newChecker: f, + } +} + +// Get returns the type of x. +// Deprecated: Use GetByValue or GetByRef instead, as they are more efficient. +func (env *TypeEnv) Get(x interface{}) types.Type { + if term, ok := x.(*Term); ok { + x = term.Value + } + + if v, ok := x.(Value); ok { + return env.GetByValue(v) + } + + panic("unreachable") +} + +// GetByValue returns the type of v. +func (env *TypeEnv) GetByValue(v Value) types.Type { + switch x := v.(type) { + + // Scalars. + case Null: + return types.Nl + case Boolean: + return types.B + case Number: + return types.N + case String: + return types.S + + // Composites. + case *Array: + static := make([]types.Type, x.Len()) + for i := range static { + tpe := env.GetByValue(x.Elem(i).Value) + static[i] = tpe + } + + var dynamic types.Type + if len(static) == 0 { + dynamic = types.A + } + + return types.NewArray(static, dynamic) + + case *lazyObj: + return env.GetByValue(x.force()) + case *object: + static := []*types.StaticProperty{} + var dynamic *types.DynamicProperty + + x.Foreach(func(k, v *Term) { + if IsConstant(k.Value) { + kjson, err := JSON(k.Value) + if err == nil { + tpe := env.GetByValue(v.Value) + static = append(static, types.NewStaticProperty(kjson, tpe)) + return + } + } + // Can't handle it as a static property, fallback to dynamic + typeK := env.GetByValue(k.Value) + typeV := env.GetByValue(v.Value) + dynamic = types.NewDynamicProperty(typeK, typeV) + }) + + if len(static) == 0 && dynamic == nil { + dynamic = types.NewDynamicProperty(types.A, types.A) + } + + return types.NewObject(static, dynamic) + + case Set: + var tpe types.Type + x.Foreach(func(elem *Term) { + tpe = types.Or(tpe, env.GetByValue(elem.Value)) + }) + if tpe == nil { + tpe = types.A + } + return types.NewSet(tpe) + + // Comprehensions. + case *ArrayComprehension: + cpy, errs := env.newChecker().CheckBody(env, x.Body) + if len(errs) == 0 { + return types.NewArray(nil, cpy.GetByValue(x.Term.Value)) + } + return nil + case *ObjectComprehension: + cpy, errs := env.newChecker().CheckBody(env, x.Body) + if len(errs) == 0 { + return types.NewObject(nil, types.NewDynamicProperty(cpy.GetByValue(x.Key.Value), cpy.GetByValue(x.Value.Value))) + } + return nil + case *SetComprehension: + cpy, errs := env.newChecker().CheckBody(env, x.Body) + if len(errs) == 0 { + return types.NewSet(cpy.GetByValue(x.Term.Value)) + } + return nil + + // Refs. + case Ref: + return env.GetByRef(x) + + // Vars. + case Var: + if node := env.tree.Child(v); node != nil { + return node.Value() + } + if env.next != nil { + return env.next.GetByValue(v) + } + return nil + + // Calls. + case Call: + return nil + } + + return env.Get(v) +} + +// GetByRef returns the type of the value referred to by ref. +func (env *TypeEnv) GetByRef(ref Ref) types.Type { + node := env.tree.Child(ref[0].Value) + if node == nil { + return env.getRefFallback(ref) + } + + return env.getRefRec(node, ref, ref[1:]) +} + +func (env *TypeEnv) getRefFallback(ref Ref) types.Type { + + if env.next != nil { + return env.next.GetByRef(ref) + } + + if RootDocumentNames.Contains(ref[0]) { + return types.A + } + + return nil +} + +func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type { + if len(tail) == 0 { + return env.getRefRecExtent(node) + } + + if node.Leaf() { + if node.children.Len() > 0 { + if child := node.Child(tail[0].Value); child != nil { + return env.getRefRec(child, ref, tail[1:]) + } + } + return selectRef(node.Value(), tail) + } + + if !IsConstant(tail[0].Value) { + return selectRef(env.getRefRecExtent(node), tail) + } + + child := node.Child(tail[0].Value) + if child == nil { + return env.getRefFallback(ref) + } + + return env.getRefRec(child, ref, tail[1:]) +} + +func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type { + + if node.Leaf() { + return node.Value() + } + + children := []*types.StaticProperty{} + + node.Children().Iter(func(key Value, child *typeTreeNode) bool { + tpe := env.getRefRecExtent(child) + + // NOTE(sr): Converting to Golang-native types here is an extension of what we did + // before -- only supporting strings. But since we cannot differentiate sets and arrays + // that way, we could reconsider. + switch key.(type) { + case String, Number, Boolean: // skip anything else + propKey, err := JSON(key) + if err != nil { + panic(fmt.Errorf("unreachable, ValueToInterface: %w", err)) + } + children = append(children, types.NewStaticProperty(propKey, tpe)) + } + return false + }) + + // TODO(tsandall): for now, these objects can have any dynamic properties + // because we don't have schema for base docs. Once schemas are supported + // we can improve this. + return types.NewObject(children, types.NewDynamicProperty(types.S, types.A)) +} + +func (env *TypeEnv) wrap() *TypeEnv { + cpy := *env + cpy.next = env + cpy.tree = newTypeTree() + return &cpy +} + +// typeTreeNode is used to store type information in a tree. +type typeTreeNode struct { + key Value + value types.Type + children *util.HasherMap[Value, *typeTreeNode] +} + +func newTypeTree() *typeTreeNode { + return &typeTreeNode{ + key: nil, + value: nil, + children: util.NewHasherMap[Value, *typeTreeNode](ValueEqual), + } +} + +func (n *typeTreeNode) Child(key Value) *typeTreeNode { + value, ok := n.children.Get(key) + if !ok { + return nil + } + return value +} + +func (n *typeTreeNode) Children() *util.HasherMap[Value, *typeTreeNode] { + return n.children +} + +func (n *typeTreeNode) Get(path Ref) types.Type { + curr := n + for _, term := range path { + child, ok := curr.children.Get(term.Value) + if !ok { + return nil + } + curr = child + } + return curr.Value() +} + +func (n *typeTreeNode) Leaf() bool { + return n.value != nil +} + +func (n *typeTreeNode) PutOne(key Value, tpe types.Type) { + c, ok := n.children.Get(key) + + var child *typeTreeNode + if !ok { + child = newTypeTree() + child.key = key + n.children.Put(key, child) + } else { + child = c + } + + child.value = tpe +} + +func (n *typeTreeNode) Put(path Ref, tpe types.Type) { + curr := n + for _, term := range path { + c, ok := curr.children.Get(term.Value) + + var child *typeTreeNode + if !ok { + child = newTypeTree() + child.key = term.Value + curr.children.Put(child.key, child) + } else { + child = c + } + + curr = child + } + curr.value = tpe +} + +// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path. +// If a types.Object is inserted, any leafs already present further down the tree are merged into the inserted object. +// path must be ground. +func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) { + curr := n + for i, term := range path { + c, ok := curr.children.Get(term.Value) + + var child *typeTreeNode + if !ok { + child = newTypeTree() + child.key = term.Value + curr.children.Put(child.key, child) + } else { + child = c + if child.value != nil && i+1 < len(path) { + // If child has an object value, merge the new value into it. + if o, ok := child.value.(*types.Object); ok { + var err error + child.value, err = insertIntoObject(o, path[i+1:], tpe, env) + if err != nil { + panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) + } + } + } + } + + curr = child + } + + curr.value = mergeTypes(curr.value, tpe) + + if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 { + // merge all leafs into the inserted object + leafs := curr.Leafs() + for p, t := range leafs { + var err error + curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env) + if err != nil { + panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) + } + } + } +} + +// mergeTypes merges the types of 'a' and 'b'. If both are sets, their 'of' types are joined with an types.Or. +// If both are objects, the key types of their dynamic properties are joined with types.Or:s, and their value types +// are recursively merged (using mergeTypes). +// If 'a' and 'b' are both objects, and at least one of them have static properties, they are joined +// with an types.Or, instead of being merged. +// If 'a' is an Any containing an Object, and 'b' is an Object (or vice versa); AND both objects have no +// static properties, they are merged. +// If 'a' and 'b' are different types, they are joined with an types.Or. +func mergeTypes(a, b types.Type) types.Type { + if a == nil { + return b + } + + if b == nil { + return a + } + + switch a := a.(type) { + case *types.Object: + if bObj, ok := b.(*types.Object); ok && len(a.StaticProperties()) == 0 && len(bObj.StaticProperties()) == 0 { + if len(a.StaticProperties()) > 0 || len(bObj.StaticProperties()) > 0 { + return types.Or(a, bObj) + } + + aDynProps := a.DynamicProperties() + bDynProps := bObj.DynamicProperties() + dynProps := types.NewDynamicProperty( + types.Or(aDynProps.Key, bDynProps.Key), + mergeTypes(aDynProps.Value, bDynProps.Value)) + return types.NewObject(nil, dynProps) + } else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 { + // If a is an object type with no static components ... + for _, t := range bAny { + if tObj, ok := t.(*types.Object); ok && len(tObj.StaticProperties()) == 0 { + // ... and b is a types.Any containing an object with no static components, we merge them. + aDynProps := a.DynamicProperties() + tDynProps := tObj.DynamicProperties() + tDynProps.Key = types.Or(tDynProps.Key, aDynProps.Key) + tDynProps.Value = types.Or(tDynProps.Value, aDynProps.Value) + return bAny + } + } + } + case *types.Set: + if bSet, ok := b.(*types.Set); ok { + return types.NewSet(types.Or(a.Of(), bSet.Of())) + } + case types.Any: + if _, ok := b.(types.Any); !ok { + return mergeTypes(b, a) + } + } + + return types.Or(a, b) +} + +func (n *typeTreeNode) String() string { + b := strings.Builder{} + + if k := n.key; k != nil { + b.WriteString(k.String()) + } else { + b.WriteString("-") + } + + if v := n.value; v != nil { + b.WriteString(": ") + b.WriteString(v.String()) + } + + n.children.Iter(func(_ Value, child *typeTreeNode) bool { + b.WriteString("\n\t+ ") + s := child.String() + s = strings.ReplaceAll(s, "\n", "\n\t") + b.WriteString(s) + + return false + }) + + return b.String() +} + +func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (*types.Object, error) { + if len(path) == 0 { + return o, nil + } + + key := env.GetByValue(path[0].Value) + + if len(path) == 1 { + var dynamicProps *types.DynamicProperty + if dp := o.DynamicProperties(); dp != nil { + dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, tpe)) + } else { + dynamicProps = types.NewDynamicProperty(key, tpe) + } + return types.NewObject(o.StaticProperties(), dynamicProps), nil + } + + child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe, env) + if err != nil { + return nil, err + } + + var dynamicProps *types.DynamicProperty + if dp := o.DynamicProperties(); dp != nil { + dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, child)) + } else { + dynamicProps = types.NewDynamicProperty(key, child) + } + return types.NewObject(o.StaticProperties(), dynamicProps), nil +} + +func (n *typeTreeNode) Leafs() map[*Ref]types.Type { + leafs := map[*Ref]types.Type{} + n.children.Iter(func(_ Value, v *typeTreeNode) bool { + collectLeafs(v, nil, leafs) + return false + }) + return leafs +} + +func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) { + nPath := append(path, NewTerm(n.key)) + if n.Leaf() { + leafs[&nPath] = n.Value() + return + } + n.children.Iter(func(_ Value, v *typeTreeNode) bool { + collectLeafs(v, nPath, leafs) + return false + }) +} + +func (n *typeTreeNode) Value() types.Type { + return n.value +} + +// selectConstant returns the attribute of the type referred to by the term. If +// the attribute type cannot be determined, nil is returned. +func selectConstant(tpe types.Type, term *Term) types.Type { + x, err := JSON(term.Value) + if err == nil { + return types.Select(tpe, x) + } + return nil +} + +// selectRef returns the type of the nested attribute referred to by ref. If +// the attribute type cannot be determined, nil is returned. If the ref +// contains vars or refs, then the returned type will be a union of the +// possible types. +func selectRef(tpe types.Type, ref Ref) types.Type { + + if tpe == nil || len(ref) == 0 { + return tpe + } + + head, tail := ref[0], ref[1:] + + switch head.Value.(type) { + case Var, Ref, *Array, Object, Set: + return selectRef(types.Values(tpe), tail) + default: + return selectRef(selectConstant(tpe, head), tail) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go b/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go new file mode 100644 index 00000000000..c7aab711416 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go @@ -0,0 +1,124 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "slices" + "strconv" + "strings" +) + +// Errors represents a series of errors encountered during parsing, compiling, +// etc. +type Errors []*Error + +func (e Errors) Error() string { + + if len(e) == 0 { + return "no error(s)" + } + + if len(e) == 1 { + return fmt.Sprintf("1 error occurred: %v", e[0].Error()) + } + + s := make([]string, len(e)) + for i, err := range e { + s[i] = err.Error() + } + + return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n")) +} + +// Sort sorts the error slice by location. If the locations are equal then the +// error message is compared. +func (e Errors) Sort() { + slices.SortFunc(e, func(a, b *Error) int { + if cmp := a.Location.Compare(b.Location); cmp != 0 { + return cmp + } + + return strings.Compare(a.Error(), b.Error()) + }) +} + +const ( + // ParseErr indicates an unclassified parse error occurred. + ParseErr = "rego_parse_error" + + // CompileErr indicates an unclassified compile error occurred. + CompileErr = "rego_compile_error" + + // TypeErr indicates a type error was caught. + TypeErr = "rego_type_error" + + // UnsafeVarErr indicates an unsafe variable was found during compilation. + UnsafeVarErr = "rego_unsafe_var_error" + + // RecursionErr indicates recursion was found during compilation. + RecursionErr = "rego_recursion_error" + + // FormatErr indicates an error occurred during formatting. + FormatErr = "rego_format_error" +) + +// IsError returns true if err is an AST error with code. +func IsError(code string, err error) bool { + if err, ok := err.(*Error); ok { + return err.Code == code + } + return false +} + +// ErrorDetails defines the interface for detailed error messages. +type ErrorDetails interface { + Lines() []string +} + +// Error represents a single error caught during parsing, compiling, etc. +type Error struct { + Code string `json:"code"` + Message string `json:"message"` + Location *Location `json:"location,omitempty"` + Details ErrorDetails `json:"details,omitempty"` +} + +func (e *Error) Error() string { + + var prefix string + + if e.Location != nil { + + if len(e.Location.File) > 0 { + prefix += e.Location.File + ":" + strconv.Itoa(e.Location.Row) + } else { + prefix += strconv.Itoa(e.Location.Row) + ":" + strconv.Itoa(e.Location.Col) + } + } + + msg := fmt.Sprintf("%v: %v", e.Code, e.Message) + + if len(prefix) > 0 { + msg = prefix + ": " + msg + } + + if e.Details != nil { + for _, line := range e.Details.Lines() { + msg += "\n\t" + line + } + } + + return msg +} + +// NewError returns a new Error object. +func NewError(code string, loc *Location, f string, a ...interface{}) *Error { + return &Error{ + Code: code, + Location: loc, + Message: fmt.Sprintf(f, a...), + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/index.go b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go new file mode 100644 index 00000000000..722b70e57e9 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go @@ -0,0 +1,968 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "slices" + "sort" + "strings" + "sync" + + "github.com/open-policy-agent/opa/v1/util" +) + +// RuleIndex defines the interface for rule indices. +type RuleIndex interface { + + // Build tries to construct an index for the given rules. If the index was + // constructed, it returns true, otherwise false. + Build(rules []*Rule) bool + + // Lookup searches the index for rules that will match the provided + // resolver. If the resolver returns an error, it is returned via err. + Lookup(resolver ValueResolver) (*IndexResult, error) + + // AllRules traverses the index and returns all rules that will match + // the provided resolver without any optimizations (effectively with + // indexing disabled). If the resolver returns an error, it is returned + // via err. + AllRules(resolver ValueResolver) (*IndexResult, error) +} + +// IndexResult contains the result of an index lookup. +type IndexResult struct { + Rules []*Rule + Else map[*Rule][]*Rule + Default *Rule + Kind RuleKind + EarlyExit bool + OnlyGroundRefs bool +} + +// NewIndexResult returns a new IndexResult object. +func NewIndexResult(kind RuleKind) *IndexResult { + return &IndexResult{ + Kind: kind, + } +} + +// Empty returns true if there are no rules to evaluate. +func (ir *IndexResult) Empty() bool { + return len(ir.Rules) == 0 && ir.Default == nil +} + +type baseDocEqIndex struct { + isVirtual func(Ref) bool + root *trieNode + defaultRule *Rule + kind RuleKind + onlyGroundRefs bool +} + +var ( + equalityRef = Equality.Ref() + equalRef = Equal.Ref() + globMatchRef = GlobMatch.Ref() + internalPrintRef = InternalPrint.Ref() + internalTestCaseRef = InternalTestCase.Ref() + + skipIndexing = NewSet(NewTerm(internalPrintRef), NewTerm(internalTestCaseRef)) +) + +func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex { + return &baseDocEqIndex{ + isVirtual: isVirtual, + root: newTrieNodeImpl(), + onlyGroundRefs: true, + } +} + +func (i *baseDocEqIndex) Build(rules []*Rule) bool { + if len(rules) == 0 { + return false + } + + i.kind = rules[0].Head.RuleKind() + indices := newrefindices(i.isVirtual) + + // build indices for each rule. + for idx := range rules { + WalkRules(rules[idx], func(rule *Rule) bool { + if rule.Default { + i.defaultRule = rule + return false + } + if i.onlyGroundRefs { + i.onlyGroundRefs = rule.Head.Reference.IsGround() + } + var skip bool + for i := range rule.Body { + if op := rule.Body[i].OperatorTerm(); op != nil && skipIndexing.Contains(op) { + skip = true + break + } + } + if !skip { + for i := range rule.Body { + indices.Update(rule, rule.Body[i]) + } + } + return false + }) + } + + // build trie out of indices. + for idx := range rules { + var prio int + WalkRules(rules[idx], func(rule *Rule) bool { + if rule.Default { + return false + } + node := i.root + if indices.Indexed(rule) { + for _, ref := range indices.Sorted() { + node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref)) + } + } + // Insert rule into trie with (insertion order, priority order) + // tuple. Retaining the insertion order allows us to return rules + // in the order they were passed to this function. + node.append([...]int{idx, prio}, rule) + prio++ + return false + }) + } + return true +} + +func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) { + tr := ttrPool.Get().(*trieTraversalResult) + + defer func() { + clear(tr.unordered) + tr.ordering = tr.ordering[:0] + tr.multiple = false + tr.exist = nil + + ttrPool.Put(tr) + }() + + err := i.root.Traverse(resolver, tr) + if err != nil { + return nil, err + } + + result := IndexResultPool.Get() + + result.Kind = i.kind + result.Default = i.defaultRule + result.OnlyGroundRefs = i.onlyGroundRefs + + if result.Rules == nil { + result.Rules = make([]*Rule, 0, len(tr.ordering)) + } else { + result.Rules = result.Rules[:0] + } + + clear(result.Else) + + for _, pos := range tr.ordering { + slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int { + return a.prio[1] - b.prio[1] + }) + nodes := tr.unordered[pos] + root := nodes[0].rule + + result.Rules = append(result.Rules, root) + if len(nodes) > 1 { + if result.Else == nil { + result.Else = map[*Rule][]*Rule{} + } + + result.Else[root] = make([]*Rule, len(nodes)-1) + for i := 1; i < len(nodes); i++ { + result.Else[root][i-1] = nodes[i].rule + } + } + } + + if !tr.multiple { + // even when the indexer hasn't seen multiple values, the rule itself could be one + // where early exit shouldn't be applied. + var lastValue Value + for i := range result.Rules { + if result.Rules[i].Head.DocKind() != CompleteDoc { + tr.multiple = true + break + } + if result.Rules[i].Head.Value != nil { + if lastValue != nil && !ValueEqual(lastValue, result.Rules[i].Head.Value.Value) { + tr.multiple = true + break + } + lastValue = result.Rules[i].Head.Value.Value + } + } + } + + result.EarlyExit = !tr.multiple + + return result, nil +} + +func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) { + tr := newTrieTraversalResult() + + // Walk over the rule trie and accumulate _all_ rules + rw := &ruleWalker{result: tr} + i.root.Do(rw) + + result := NewIndexResult(i.kind) + result.Default = i.defaultRule + result.OnlyGroundRefs = i.onlyGroundRefs + result.Rules = make([]*Rule, 0, len(tr.ordering)) + + for _, pos := range tr.ordering { + slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int { + return a.prio[1] - b.prio[1] + }) + nodes := tr.unordered[pos] + root := nodes[0].rule + result.Rules = append(result.Rules, root) + if len(nodes) > 1 { + if result.Else == nil { + result.Else = map[*Rule][]*Rule{} + } + + result.Else[root] = make([]*Rule, len(nodes)-1) + for i := 1; i < len(nodes); i++ { + result.Else[root][i-1] = nodes[i].rule + } + } + } + + result.EarlyExit = !tr.multiple + + return result, nil +} + +type ruleWalker struct { + result *trieTraversalResult +} + +func (r *ruleWalker) Do(x interface{}) trieWalker { + tn := x.(*trieNode) + r.result.Add(tn) + return r +} + +type valueMapper struct { + Key string + MapValue func(Value) Value +} + +type refindex struct { + Ref Ref + Value Value + Mapper *valueMapper +} + +type refindices struct { + isVirtual func(Ref) bool + rules map[*Rule][]*refindex + frequency *util.HasherMap[Ref, int] + sorted []Ref +} + +func newrefindices(isVirtual func(Ref) bool) *refindices { + return &refindices{ + isVirtual: isVirtual, + rules: map[*Rule][]*refindex{}, + frequency: util.NewHasherMap[Ref, int](RefEqual), + } +} + +// Update attempts to update the refindices for the given expression in the +// given rule. If the expression cannot be indexed the update does not affect +// the indices. +func (i *refindices) Update(rule *Rule, expr *Expr) { + + if expr.Negated { + return + } + + if len(expr.With) > 0 { + // NOTE(tsandall): In the future, we may need to consider expressions + // that have with statements applied to them. + return + } + + op := expr.Operator() + + switch { + case op.Equal(equalityRef): + i.updateEq(rule, expr) + + case op.Equal(equalRef) && len(expr.Operands()) == 2: + // NOTE(tsandall): if equal() is called with more than two arguments the + // output value is being captured in which case the indexer cannot + // exclude the rule if the equal() call would return false (because the + // false value must still be produced.) + i.updateEq(rule, expr) + + case op.Equal(globMatchRef) && len(expr.Operands()) == 3: + // NOTE(sr): Same as with equal() above -- 4 operands means the output + // of `glob.match` is captured and the rule can thus not be excluded. + i.updateGlobMatch(rule, expr) + } +} + +// Sorted returns a sorted list of references that the indices were built from. +// References that appear more frequently in the indexed rules are ordered +// before less frequently appearing references. +func (i *refindices) Sorted() []Ref { + + if i.sorted == nil { + counts := make([]int, 0, i.frequency.Len()) + i.sorted = make([]Ref, 0, i.frequency.Len()) + + i.frequency.Iter(func(k Ref, v int) bool { + counts = append(counts, v) + i.sorted = append(i.sorted, k) + return false + }) + + sort.Slice(i.sorted, func(a, b int) bool { + if counts[a] > counts[b] { + return true + } else if counts[b] > counts[a] { + return false + } + return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0 + }) + } + + return i.sorted +} + +func (i *refindices) Indexed(rule *Rule) bool { + return len(i.rules[rule]) > 0 +} + +func (i *refindices) Value(rule *Rule, ref Ref) Value { + if index := i.index(rule, ref); index != nil { + return index.Value + } + return nil +} + +func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper { + if index := i.index(rule, ref); index != nil { + return index.Mapper + } + return nil +} + +func (i *refindices) updateEq(rule *Rule, expr *Expr) { + a, b := expr.Operand(0), expr.Operand(1) + args := rule.Head.Args + if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok { + i.insert(rule, idx) + return + } + if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, b, a); ok { + i.insert(rule, idx) + return + } +} + +func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { + args := rule.Head.Args + + delim, ok := globDelimiterToString(expr.Operand(1)) + if !ok { + return + } + + if arr := globPatternToArray(expr.Operand(0), delim); arr != nil { + // The 3rd operand of glob.match is the value to match. We assume the + // 3rd operand was a reference that has been rewritten and bound to a + // variable earlier in the query OR a function argument variable. + match := expr.Operand(2) + if _, ok := match.Value.(Var); ok { + var ref Ref + for _, other := range i.rules[rule] { + if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 { + ref = other.Ref + } + } + if ref == nil { + for j, arg := range args { + if arg.Equal(match) { + ref = Ref{FunctionArgRootDocument, InternedIntNumberTerm(j)} + } + } + } + if ref != nil { + i.insert(rule, &refindex{ + Ref: ref, + Value: arr.Value, + Mapper: &valueMapper{ + Key: delim, + MapValue: func(v Value) Value { + if s, ok := v.(String); ok { + return stringSliceToArray(splitStringEscaped(string(s), delim)) + } + return v + }, + }, + }) + } + } + } +} + +func (i *refindices) insert(rule *Rule, index *refindex) { + + count, ok := i.frequency.Get(index.Ref) + if !ok { + count = 0 + } + + i.frequency.Put(index.Ref, count+1) + + for pos, other := range i.rules[rule] { + if other.Ref.Equal(index.Ref) { + i.rules[rule][pos] = index + return + } + } + + i.rules[rule] = append(i.rules[rule], index) +} + +func (i *refindices) index(rule *Rule, ref Ref) *refindex { + for _, index := range i.rules[rule] { + if index.Ref.Equal(ref) { + return index + } + } + return nil +} + +type trieWalker interface { + Do(x interface{}) trieWalker +} + +type trieTraversalResult struct { + unordered map[int][]*ruleNode + ordering []int + exist *Term + multiple bool +} + +var ttrPool = sync.Pool{ + New: func() any { + return newTrieTraversalResult() + }, +} + +func newTrieTraversalResult() *trieTraversalResult { + return &trieTraversalResult{ + unordered: map[int][]*ruleNode{}, + } +} + +func (tr *trieTraversalResult) Add(t *trieNode) { + for _, node := range t.rules { + root := node.prio[0] + nodes, ok := tr.unordered[root] + if !ok { + tr.ordering = append(tr.ordering, root) + } + tr.unordered[root] = append(nodes, node) + } + if t.multiple { + tr.multiple = true + } + if tr.multiple || t.value == nil { + return + } + if t.value.IsGround() && tr.exist == nil || tr.exist.Equal(t.value) { + tr.exist = t.value + return + } + tr.multiple = true +} + +type trieNode struct { + ref Ref + mappers []*valueMapper + next *trieNode + any *trieNode + undefined *trieNode + scalars *util.HasherMap[Value, *trieNode] + array *trieNode + rules []*ruleNode + value *Term + multiple bool +} + +func (node *trieNode) String() string { + var flags []string + flags = append(flags, fmt.Sprintf("self:%p", node)) + if len(node.ref) > 0 { + flags = append(flags, node.ref.String()) + } + if node.next != nil { + flags = append(flags, fmt.Sprintf("next:%p", node.next)) + } + if node.any != nil { + flags = append(flags, fmt.Sprintf("any:%p", node.any)) + } + if node.undefined != nil { + flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined)) + } + if node.array != nil { + flags = append(flags, fmt.Sprintf("array:%p", node.array)) + } + if node.scalars.Len() > 0 { + buf := make([]string, 0, node.scalars.Len()) + node.scalars.Iter(func(key Value, val *trieNode) bool { + buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val)) + return false + }) + sort.Strings(buf) + flags = append(flags, strings.Join(buf, " ")) + } + if len(node.rules) > 0 { + flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules))) + } + if len(node.mappers) > 0 { + flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers))) + } + if node.value != nil { + flags = append(flags, "value exists") + } + return strings.Join(flags, " ") +} + +func (node *trieNode) append(prio [2]int, rule *Rule) { + node.rules = append(node.rules, &ruleNode{prio, rule}) + + if node.value != nil && rule.Head.Value != nil && !node.value.Equal(rule.Head.Value) { + node.multiple = true + } + + if node.value == nil && rule.Head.DocKind() == CompleteDoc { + node.value = rule.Head.Value + } +} + +type ruleNode struct { + prio [2]int + rule *Rule +} + +func newTrieNodeImpl() *trieNode { + return &trieNode{ + scalars: util.NewHasherMap[Value, *trieNode](ValueEqual), + } +} + +func (node *trieNode) Do(walker trieWalker) { + next := walker.Do(node) + if next == nil { + return + } + if node.any != nil { + node.any.Do(next) + } + if node.undefined != nil { + node.undefined.Do(next) + } + + node.scalars.Iter(func(_ Value, child *trieNode) bool { + child.Do(next) + return false + }) + + if node.array != nil { + node.array.Do(next) + } + if node.next != nil { + node.next.Do(next) + } +} + +func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode { + + if node.next == nil { + node.next = newTrieNodeImpl() + node.next.ref = ref + } + + if mapper != nil { + node.next.addMapper(mapper) + } + + return node.next.insertValue(value) +} + +func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error { + + if node == nil { + return nil + } + + tr.Add(node) + + return node.next.traverse(resolver, tr) +} + +func (node *trieNode) addMapper(mapper *valueMapper) { + for i := range node.mappers { + if node.mappers[i].Key == mapper.Key { + return + } + } + node.mappers = append(node.mappers, mapper) +} + +func (node *trieNode) insertValue(value Value) *trieNode { + + switch value := value.(type) { + case nil: + if node.undefined == nil { + node.undefined = newTrieNodeImpl() + } + return node.undefined + case Var: + if node.any == nil { + node.any = newTrieNodeImpl() + } + return node.any + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(value) + if !ok { + child = newTrieNodeImpl() + node.scalars.Put(value, child) + } + return child + case *Array: + if node.array == nil { + node.array = newTrieNodeImpl() + } + return node.array.insertArray(value) + } + + panic("illegal value") +} + +func (node *trieNode) insertArray(arr *Array) *trieNode { + + if arr.Len() == 0 { + return node + } + + switch head := arr.Elem(0).Value.(type) { + case Var: + if node.any == nil { + node.any = newTrieNodeImpl() + } + return node.any.insertArray(arr.Slice(1, -1)) + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(head) + if !ok { + child = newTrieNodeImpl() + node.scalars.Put(head, child) + } + return child.insertArray(arr.Slice(1, -1)) + } + + panic("illegal value") +} + +func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error { + + if node == nil { + return nil + } + + v, err := resolver.Resolve(node.ref) + if err != nil { + if IsUnknownValueErr(err) { + return node.traverseUnknown(resolver, tr) + } + return err + } + + if node.undefined != nil { + err = node.undefined.Traverse(resolver, tr) + if err != nil { + return err + } + } + + if v == nil { + return nil + } + + if node.any != nil { + err = node.any.Traverse(resolver, tr) + if err != nil { + return err + } + } + + if err := node.traverseValue(resolver, tr, v); err != nil { + return err + } + + for i := range node.mappers { + if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil { + return err + } + } + + return nil +} + +func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error { + + switch value := value.(type) { + case *Array: + if node.array == nil { + return nil + } + return node.array.traverseArray(resolver, tr, value) + + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(value) + if !ok { + return nil + } + return child.Traverse(resolver, tr) + } + + return nil +} + +func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error { + + if arr.Len() == 0 { + return node.Traverse(resolver, tr) + } + + if node.any != nil { + err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1)) + if err != nil { + return err + } + } + + head := arr.Elem(0).Value + + if !IsScalar(head) { + return nil + } + + switch head := head.(type) { + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(head) + if !ok { + return nil + } + return child.traverseArray(resolver, tr, arr.Slice(1, -1)) + } + + panic("illegal value") +} + +func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error { + + if node == nil { + return nil + } + + if err := node.Traverse(resolver, tr); err != nil { + return err + } + + if err := node.undefined.traverseUnknown(resolver, tr); err != nil { + return err + } + + if err := node.any.traverseUnknown(resolver, tr); err != nil { + return err + } + + if err := node.array.traverseUnknown(resolver, tr); err != nil { + return err + } + + var iterErr error + node.scalars.Iter(func(_ Value, child *trieNode) bool { + return child.traverseUnknown(resolver, tr) != nil + }) + + return iterErr +} + +// If term `a` is one of the function's operands, we store a Ref: `args[0]` +// for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll +// bind `args[0]` and `args[1]` to this rule when called for (x=10) and +// (y=12) respectively. +func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) (*refindex, bool) { + switch v := a.Value.(type) { + case Var: + for i, arg := range args { + if arg.Value.Compare(a.Value) == 0 { + if bval, ok := indexValue(b); ok { + return &refindex{Ref: Ref{FunctionArgRootDocument, InternedIntNumberTerm(i)}, Value: bval}, true + } + } + } + case Ref: + if !RootDocumentNames.Contains(v[0]) { + return nil, false + } + if isVirtual(v) { + return nil, false + } + if v.IsNested() || !v.IsGround() { + return nil, false + } + if bval, ok := indexValue(b); ok { + return &refindex{Ref: v, Value: bval}, true + } + } + return nil, false +} + +func indexValue(b *Term) (Value, bool) { + switch b := b.Value.(type) { + case Null, Boolean, Number, String, Var: + return b, true + case *Array: + stop := false + first := true + vis := NewGenericVisitor(func(x interface{}) bool { + if first { + first = false + return false + } + switch x.(type) { + // No nested structures or values that require evaluation (other than var). + case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref: + stop = true + } + return stop + }) + vis.Walk(b) + if !stop { + return b, true + } + } + + return nil, false +} + +func globDelimiterToString(delim *Term) (string, bool) { + + arr, ok := delim.Value.(*Array) + if !ok { + return "", false + } + + var result string + + if arr.Len() == 0 { + result = "." + } else { + for i := range arr.Len() { + term := arr.Elem(i) + s, ok := term.Value.(String) + if !ok { + return "", false + } + result += string(s) + } + } + + return result, true +} + +var globwildcard = VarTerm("$globwildcard") + +func globPatternToArray(pattern *Term, delim string) *Term { + + s, ok := pattern.Value.(String) + if !ok { + return nil + } + + parts := splitStringEscaped(string(s), delim) + arr := make([]*Term, len(parts)) + + for i := range parts { + if parts[i] == "*" { + arr[i] = globwildcard + } else { + var escaped bool + for _, c := range parts[i] { + if c == '\\' { + escaped = !escaped + continue + } + if !escaped { + switch c { + case '[', '?', '{', '*': + // TODO(tsandall): super glob and character pattern + // matching not supported yet. + return nil + } + } + escaped = false + } + arr[i] = StringTerm(parts[i]) + } + } + + return NewTerm(NewArray(arr...)) +} + +// splits s on characters in delim except if delim characters have been escaped +// with reverse solidus. +func splitStringEscaped(s string, delim string) []string { + + var last, curr int + var escaped bool + var result []string + + for ; curr < len(s); curr++ { + if s[curr] == '\\' || escaped { + escaped = !escaped + continue + } + if strings.ContainsRune(delim, rune(s[curr])) { + result = append(result, s[last:curr]) + last = curr + 1 + } + } + + result = append(result, s[last:]) + + return result +} + +func stringSliceToArray(s []string) *Array { + arr := make([]*Term, len(s)) + for i, v := range s { + arr[i] = StringTerm(v) + } + return NewArray(arr...) +} diff --git a/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go similarity index 91% rename from vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go rename to vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go index a0200ac18dd..d70253bc5c8 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go @@ -10,7 +10,8 @@ import ( "unicode" "unicode/utf8" - "github.com/open-policy-agent/opa/ast/internal/tokens" + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" + "github.com/open-policy-agent/opa/v1/util" ) const bom = 0xFEFF @@ -18,31 +19,31 @@ const bom = 0xFEFF // Scanner is used to tokenize an input stream of // Rego source code. type Scanner struct { + keywords map[string]tokens.Token + bs []byte + errors []Error + tabs []int offset int row int col int - bs []byte - curr rune width int - errors []Error - keywords map[string]tokens.Token - tabs []int + curr rune regoV1Compatible bool } // Error represents a scanner error. type Error struct { - Pos Position Message string + Pos Position } // Position represents a point in the scanned source code. type Position struct { + Tabs []int // positions of any tabs preceding Col Offset int // start offset in bytes End int // end offset in bytes Row int // line number computed in bytes Col int // column number computed in bytes - Tabs []int // positions of any tabs preceding Col } // New returns an initialized scanner that will scan @@ -100,8 +101,8 @@ func (s *Scanner) Keyword(lit string) tokens.Token { func (s *Scanner) AddKeyword(kw string, tok tokens.Token) { s.keywords[kw] = tok - switch tok { - case tokens.Every: // importing 'every' means also importing 'in' + if tok == tokens.Every { + // importing 'every' means also importing 'in' s.keywords["in"] = tokens.In } } @@ -164,7 +165,21 @@ func (s *Scanner) Scan() (tokens.Token, Position, string, []Error) { var lit string if s.isWhitespace() { - lit = string(s.curr) + // string(rune) is an unnecessary heap allocation in this case as we know all + // the possible whitespace values, and can simply translate to string ourselves + switch s.curr { + case ' ': + lit = " " + case '\t': + lit = "\t" + case '\n': + lit = "\n" + case '\r': + lit = "\r" + default: + // unreachable unless isWhitespace changes + lit = string(s.curr) + } s.next() tok = tokens.Whitespace } else if isLetter(s.curr) { @@ -270,7 +285,8 @@ func (s *Scanner) scanIdentifier() string { for isLetter(s.curr) || isDigit(s.curr) { s.next() } - return string(s.bs[start : s.offset-1]) + + return util.ByteSliceToString(s.bs[start : s.offset-1]) } func (s *Scanner) scanNumber() string { @@ -321,7 +337,7 @@ func (s *Scanner) scanNumber() string { } } - return string(s.bs[start : s.offset-1]) + return util.ByteSliceToString(s.bs[start : s.offset-1]) } func (s *Scanner) scanString() string { @@ -355,7 +371,7 @@ func (s *Scanner) scanString() string { } } - return string(s.bs[start : s.offset-1]) + return util.ByteSliceToString(s.bs[start : s.offset-1]) } func (s *Scanner) scanRawString() string { @@ -370,7 +386,8 @@ func (s *Scanner) scanRawString() string { break } } - return string(s.bs[start : s.offset-1]) + + return util.ByteSliceToString(s.bs[start : s.offset-1]) } func (s *Scanner) scanComment() string { @@ -381,9 +398,10 @@ func (s *Scanner) scanComment() string { end := s.offset - 1 // Trim carriage returns that precede the newline if s.offset > 1 && s.bs[s.offset-2] == '\r' { - end = end - 1 + end -= 1 } - return string(s.bs[start:end]) + + return util.ByteSliceToString(s.bs[start:end]) } func (s *Scanner) next() { @@ -413,7 +431,7 @@ func (s *Scanner) next() { if s.curr == '\n' { s.row++ s.col = 0 - s.tabs = []int{} + s.tabs = s.tabs[:0] } else { s.col++ if s.curr == '\t' { diff --git a/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go similarity index 93% rename from vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go rename to vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go index 623ed7ed21d..4033ba81ae6 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go @@ -4,12 +4,14 @@ package tokens +import "maps" + // Token represents a single Rego source code token // for use by the Parser. -type Token int +type Token uint8 func (t Token) String() string { - if t < 0 || int(t) >= len(strings) { + if int(t) >= len(strings) { return "unknown" } return strings[t] @@ -137,11 +139,7 @@ var keywords = map[string]Token{ // Keywords returns a copy of the default string -> Token keyword map. func Keywords() map[string]Token { - cpy := make(map[string]Token, len(keywords)) - for k, v := range keywords { - cpy[k] = v - } - return cpy + return maps.Clone(keywords) } // IsKeyword returns if a token is a keyword diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go b/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go new file mode 100644 index 00000000000..012cffb9a92 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go @@ -0,0 +1,1120 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import "strconv" + +// NOTE! Great care must be taken **not** to modify the terms returned +// from these functions, as they are shared across all callers. + +var ( + booleanTrueTerm = &Term{Value: Boolean(true)} + booleanFalseTerm = &Term{Value: Boolean(false)} + + // since this is by far the most common negative number + minusOneTerm = &Term{Value: Number("-1")} + + InternedNullTerm = &Term{Value: Null{}} + + InternedEmptyString = StringTerm("") + InternedEmptyObject = ObjectTerm() +) + +// InternedBooleanTerm returns an interned term with the given boolean value. +func InternedBooleanTerm(b bool) *Term { + if b { + return booleanTrueTerm + } + + return booleanFalseTerm +} + +// InternedIntNumberTerm returns a term with the given integer value. The term is +// cached between -1 to 512, and for values outside of that range, this function +// is equivalent to ast.IntNumberTerm. +func InternedIntNumberTerm(i int) *Term { + if i >= 0 && i < len(intNumberTerms) { + return intNumberTerms[i] + } + + if i == -1 { + return minusOneTerm + } + + return &Term{Value: Number(strconv.Itoa(i))} +} + +// InternedIntFromString returns a term with the given integer value if the string +// maps to an interned term. If the string does not map to an interned term, nil is +// returned. +func InternedIntNumberTermFromString(s string) *Term { + if term, ok := stringToIntNumberTermMap[s]; ok { + return term + } + + return nil +} + +// HasInternedIntNumberTerm returns true if the given integer value maps to an interned +// term, otherwise false. +func HasInternedIntNumberTerm(i int) bool { + return i >= -1 && i < len(intNumberTerms) +} + +func InternedStringTerm(s string) *Term { + if term, ok := internedStringTerms[s]; ok { + return term + } + + return StringTerm(s) +} + +var internedStringTerms = map[string]*Term{ + "": InternedEmptyString, + "0": StringTerm("0"), + "1": StringTerm("1"), + "2": StringTerm("2"), + "3": StringTerm("3"), + "4": StringTerm("4"), + "5": StringTerm("5"), + "6": StringTerm("6"), + "7": StringTerm("7"), + "8": StringTerm("8"), + "9": StringTerm("9"), + "10": StringTerm("10"), +} + +var stringToIntNumberTermMap = map[string]*Term{ + "-1": minusOneTerm, + "0": intNumberTerms[0], + "1": intNumberTerms[1], + "2": intNumberTerms[2], + "3": intNumberTerms[3], + "4": intNumberTerms[4], + "5": intNumberTerms[5], + "6": intNumberTerms[6], + "7": intNumberTerms[7], + "8": intNumberTerms[8], + "9": intNumberTerms[9], + "10": intNumberTerms[10], + "11": intNumberTerms[11], + "12": intNumberTerms[12], + "13": intNumberTerms[13], + "14": intNumberTerms[14], + "15": intNumberTerms[15], + "16": intNumberTerms[16], + "17": intNumberTerms[17], + "18": intNumberTerms[18], + "19": intNumberTerms[19], + "20": intNumberTerms[20], + "21": intNumberTerms[21], + "22": intNumberTerms[22], + "23": intNumberTerms[23], + "24": intNumberTerms[24], + "25": intNumberTerms[25], + "26": intNumberTerms[26], + "27": intNumberTerms[27], + "28": intNumberTerms[28], + "29": intNumberTerms[29], + "30": intNumberTerms[30], + "31": intNumberTerms[31], + "32": intNumberTerms[32], + "33": intNumberTerms[33], + "34": intNumberTerms[34], + "35": intNumberTerms[35], + "36": intNumberTerms[36], + "37": intNumberTerms[37], + "38": intNumberTerms[38], + "39": intNumberTerms[39], + "40": intNumberTerms[40], + "41": intNumberTerms[41], + "42": intNumberTerms[42], + "43": intNumberTerms[43], + "44": intNumberTerms[44], + "45": intNumberTerms[45], + "46": intNumberTerms[46], + "47": intNumberTerms[47], + "48": intNumberTerms[48], + "49": intNumberTerms[49], + "50": intNumberTerms[50], + "51": intNumberTerms[51], + "52": intNumberTerms[52], + "53": intNumberTerms[53], + "54": intNumberTerms[54], + "55": intNumberTerms[55], + "56": intNumberTerms[56], + "57": intNumberTerms[57], + "58": intNumberTerms[58], + "59": intNumberTerms[59], + "60": intNumberTerms[60], + "61": intNumberTerms[61], + "62": intNumberTerms[62], + "63": intNumberTerms[63], + "64": intNumberTerms[64], + "65": intNumberTerms[65], + "66": intNumberTerms[66], + "67": intNumberTerms[67], + "68": intNumberTerms[68], + "69": intNumberTerms[69], + "70": intNumberTerms[70], + "71": intNumberTerms[71], + "72": intNumberTerms[72], + "73": intNumberTerms[73], + "74": intNumberTerms[74], + "75": intNumberTerms[75], + "76": intNumberTerms[76], + "77": intNumberTerms[77], + "78": intNumberTerms[78], + "79": intNumberTerms[79], + "80": intNumberTerms[80], + "81": intNumberTerms[81], + "82": intNumberTerms[82], + "83": intNumberTerms[83], + "84": intNumberTerms[84], + "85": intNumberTerms[85], + "86": intNumberTerms[86], + "87": intNumberTerms[87], + "88": intNumberTerms[88], + "89": intNumberTerms[89], + "90": intNumberTerms[90], + "91": intNumberTerms[91], + "92": intNumberTerms[92], + "93": intNumberTerms[93], + "94": intNumberTerms[94], + "95": intNumberTerms[95], + "96": intNumberTerms[96], + "97": intNumberTerms[97], + "98": intNumberTerms[98], + "99": intNumberTerms[99], + "100": intNumberTerms[100], + "101": intNumberTerms[101], + "102": intNumberTerms[102], + "103": intNumberTerms[103], + "104": intNumberTerms[104], + "105": intNumberTerms[105], + "106": intNumberTerms[106], + "107": intNumberTerms[107], + "108": intNumberTerms[108], + "109": intNumberTerms[109], + "110": intNumberTerms[110], + "111": intNumberTerms[111], + "112": intNumberTerms[112], + "113": intNumberTerms[113], + "114": intNumberTerms[114], + "115": intNumberTerms[115], + "116": intNumberTerms[116], + "117": intNumberTerms[117], + "118": intNumberTerms[118], + "119": intNumberTerms[119], + "120": intNumberTerms[120], + "121": intNumberTerms[121], + "122": intNumberTerms[122], + "123": intNumberTerms[123], + "124": intNumberTerms[124], + "125": intNumberTerms[125], + "126": intNumberTerms[126], + "127": intNumberTerms[127], + "128": intNumberTerms[128], + "129": intNumberTerms[129], + "130": intNumberTerms[130], + "131": intNumberTerms[131], + "132": intNumberTerms[132], + "133": intNumberTerms[133], + "134": intNumberTerms[134], + "135": intNumberTerms[135], + "136": intNumberTerms[136], + "137": intNumberTerms[137], + "138": intNumberTerms[138], + "139": intNumberTerms[139], + "140": intNumberTerms[140], + "141": intNumberTerms[141], + "142": intNumberTerms[142], + "143": intNumberTerms[143], + "144": intNumberTerms[144], + "145": intNumberTerms[145], + "146": intNumberTerms[146], + "147": intNumberTerms[147], + "148": intNumberTerms[148], + "149": intNumberTerms[149], + "150": intNumberTerms[150], + "151": intNumberTerms[151], + "152": intNumberTerms[152], + "153": intNumberTerms[153], + "154": intNumberTerms[154], + "155": intNumberTerms[155], + "156": intNumberTerms[156], + "157": intNumberTerms[157], + "158": intNumberTerms[158], + "159": intNumberTerms[159], + "160": intNumberTerms[160], + "161": intNumberTerms[161], + "162": intNumberTerms[162], + "163": intNumberTerms[163], + "164": intNumberTerms[164], + "165": intNumberTerms[165], + "166": intNumberTerms[166], + "167": intNumberTerms[167], + "168": intNumberTerms[168], + "169": intNumberTerms[169], + "170": intNumberTerms[170], + "171": intNumberTerms[171], + "172": intNumberTerms[172], + "173": intNumberTerms[173], + "174": intNumberTerms[174], + "175": intNumberTerms[175], + "176": intNumberTerms[176], + "177": intNumberTerms[177], + "178": intNumberTerms[178], + "179": intNumberTerms[179], + "180": intNumberTerms[180], + "181": intNumberTerms[181], + "182": intNumberTerms[182], + "183": intNumberTerms[183], + "184": intNumberTerms[184], + "185": intNumberTerms[185], + "186": intNumberTerms[186], + "187": intNumberTerms[187], + "188": intNumberTerms[188], + "189": intNumberTerms[189], + "190": intNumberTerms[190], + "191": intNumberTerms[191], + "192": intNumberTerms[192], + "193": intNumberTerms[193], + "194": intNumberTerms[194], + "195": intNumberTerms[195], + "196": intNumberTerms[196], + "197": intNumberTerms[197], + "198": intNumberTerms[198], + "199": intNumberTerms[199], + "200": intNumberTerms[200], + "201": intNumberTerms[201], + "202": intNumberTerms[202], + "203": intNumberTerms[203], + "204": intNumberTerms[204], + "205": intNumberTerms[205], + "206": intNumberTerms[206], + "207": intNumberTerms[207], + "208": intNumberTerms[208], + "209": intNumberTerms[209], + "210": intNumberTerms[210], + "211": intNumberTerms[211], + "212": intNumberTerms[212], + "213": intNumberTerms[213], + "214": intNumberTerms[214], + "215": intNumberTerms[215], + "216": intNumberTerms[216], + "217": intNumberTerms[217], + "218": intNumberTerms[218], + "219": intNumberTerms[219], + "220": intNumberTerms[220], + "221": intNumberTerms[221], + "222": intNumberTerms[222], + "223": intNumberTerms[223], + "224": intNumberTerms[224], + "225": intNumberTerms[225], + "226": intNumberTerms[226], + "227": intNumberTerms[227], + "228": intNumberTerms[228], + "229": intNumberTerms[229], + "230": intNumberTerms[230], + "231": intNumberTerms[231], + "232": intNumberTerms[232], + "233": intNumberTerms[233], + "234": intNumberTerms[234], + "235": intNumberTerms[235], + "236": intNumberTerms[236], + "237": intNumberTerms[237], + "238": intNumberTerms[238], + "239": intNumberTerms[239], + "240": intNumberTerms[240], + "241": intNumberTerms[241], + "242": intNumberTerms[242], + "243": intNumberTerms[243], + "244": intNumberTerms[244], + "245": intNumberTerms[245], + "246": intNumberTerms[246], + "247": intNumberTerms[247], + "248": intNumberTerms[248], + "249": intNumberTerms[249], + "250": intNumberTerms[250], + "251": intNumberTerms[251], + "252": intNumberTerms[252], + "253": intNumberTerms[253], + "254": intNumberTerms[254], + "255": intNumberTerms[255], + "256": intNumberTerms[256], + "257": intNumberTerms[257], + "258": intNumberTerms[258], + "259": intNumberTerms[259], + "260": intNumberTerms[260], + "261": intNumberTerms[261], + "262": intNumberTerms[262], + "263": intNumberTerms[263], + "264": intNumberTerms[264], + "265": intNumberTerms[265], + "266": intNumberTerms[266], + "267": intNumberTerms[267], + "268": intNumberTerms[268], + "269": intNumberTerms[269], + "270": intNumberTerms[270], + "271": intNumberTerms[271], + "272": intNumberTerms[272], + "273": intNumberTerms[273], + "274": intNumberTerms[274], + "275": intNumberTerms[275], + "276": intNumberTerms[276], + "277": intNumberTerms[277], + "278": intNumberTerms[278], + "279": intNumberTerms[279], + "280": intNumberTerms[280], + "281": intNumberTerms[281], + "282": intNumberTerms[282], + "283": intNumberTerms[283], + "284": intNumberTerms[284], + "285": intNumberTerms[285], + "286": intNumberTerms[286], + "287": intNumberTerms[287], + "288": intNumberTerms[288], + "289": intNumberTerms[289], + "290": intNumberTerms[290], + "291": intNumberTerms[291], + "292": intNumberTerms[292], + "293": intNumberTerms[293], + "294": intNumberTerms[294], + "295": intNumberTerms[295], + "296": intNumberTerms[296], + "297": intNumberTerms[297], + "298": intNumberTerms[298], + "299": intNumberTerms[299], + "300": intNumberTerms[300], + "301": intNumberTerms[301], + "302": intNumberTerms[302], + "303": intNumberTerms[303], + "304": intNumberTerms[304], + "305": intNumberTerms[305], + "306": intNumberTerms[306], + "307": intNumberTerms[307], + "308": intNumberTerms[308], + "309": intNumberTerms[309], + "310": intNumberTerms[310], + "311": intNumberTerms[311], + "312": intNumberTerms[312], + "313": intNumberTerms[313], + "314": intNumberTerms[314], + "315": intNumberTerms[315], + "316": intNumberTerms[316], + "317": intNumberTerms[317], + "318": intNumberTerms[318], + "319": intNumberTerms[319], + "320": intNumberTerms[320], + "321": intNumberTerms[321], + "322": intNumberTerms[322], + "323": intNumberTerms[323], + "324": intNumberTerms[324], + "325": intNumberTerms[325], + "326": intNumberTerms[326], + "327": intNumberTerms[327], + "328": intNumberTerms[328], + "329": intNumberTerms[329], + "330": intNumberTerms[330], + "331": intNumberTerms[331], + "332": intNumberTerms[332], + "333": intNumberTerms[333], + "334": intNumberTerms[334], + "335": intNumberTerms[335], + "336": intNumberTerms[336], + "337": intNumberTerms[337], + "338": intNumberTerms[338], + "339": intNumberTerms[339], + "340": intNumberTerms[340], + "341": intNumberTerms[341], + "342": intNumberTerms[342], + "343": intNumberTerms[343], + "344": intNumberTerms[344], + "345": intNumberTerms[345], + "346": intNumberTerms[346], + "347": intNumberTerms[347], + "348": intNumberTerms[348], + "349": intNumberTerms[349], + "350": intNumberTerms[350], + "351": intNumberTerms[351], + "352": intNumberTerms[352], + "353": intNumberTerms[353], + "354": intNumberTerms[354], + "355": intNumberTerms[355], + "356": intNumberTerms[356], + "357": intNumberTerms[357], + "358": intNumberTerms[358], + "359": intNumberTerms[359], + "360": intNumberTerms[360], + "361": intNumberTerms[361], + "362": intNumberTerms[362], + "363": intNumberTerms[363], + "364": intNumberTerms[364], + "365": intNumberTerms[365], + "366": intNumberTerms[366], + "367": intNumberTerms[367], + "368": intNumberTerms[368], + "369": intNumberTerms[369], + "370": intNumberTerms[370], + "371": intNumberTerms[371], + "372": intNumberTerms[372], + "373": intNumberTerms[373], + "374": intNumberTerms[374], + "375": intNumberTerms[375], + "376": intNumberTerms[376], + "377": intNumberTerms[377], + "378": intNumberTerms[378], + "379": intNumberTerms[379], + "380": intNumberTerms[380], + "381": intNumberTerms[381], + "382": intNumberTerms[382], + "383": intNumberTerms[383], + "384": intNumberTerms[384], + "385": intNumberTerms[385], + "386": intNumberTerms[386], + "387": intNumberTerms[387], + "388": intNumberTerms[388], + "389": intNumberTerms[389], + "390": intNumberTerms[390], + "391": intNumberTerms[391], + "392": intNumberTerms[392], + "393": intNumberTerms[393], + "394": intNumberTerms[394], + "395": intNumberTerms[395], + "396": intNumberTerms[396], + "397": intNumberTerms[397], + "398": intNumberTerms[398], + "399": intNumberTerms[399], + "400": intNumberTerms[400], + "401": intNumberTerms[401], + "402": intNumberTerms[402], + "403": intNumberTerms[403], + "404": intNumberTerms[404], + "405": intNumberTerms[405], + "406": intNumberTerms[406], + "407": intNumberTerms[407], + "408": intNumberTerms[408], + "409": intNumberTerms[409], + "410": intNumberTerms[410], + "411": intNumberTerms[411], + "412": intNumberTerms[412], + "413": intNumberTerms[413], + "414": intNumberTerms[414], + "415": intNumberTerms[415], + "416": intNumberTerms[416], + "417": intNumberTerms[417], + "418": intNumberTerms[418], + "419": intNumberTerms[419], + "420": intNumberTerms[420], + "421": intNumberTerms[421], + "422": intNumberTerms[422], + "423": intNumberTerms[423], + "424": intNumberTerms[424], + "425": intNumberTerms[425], + "426": intNumberTerms[426], + "427": intNumberTerms[427], + "428": intNumberTerms[428], + "429": intNumberTerms[429], + "430": intNumberTerms[430], + "431": intNumberTerms[431], + "432": intNumberTerms[432], + "433": intNumberTerms[433], + "434": intNumberTerms[434], + "435": intNumberTerms[435], + "436": intNumberTerms[436], + "437": intNumberTerms[437], + "438": intNumberTerms[438], + "439": intNumberTerms[439], + "440": intNumberTerms[440], + "441": intNumberTerms[441], + "442": intNumberTerms[442], + "443": intNumberTerms[443], + "444": intNumberTerms[444], + "445": intNumberTerms[445], + "446": intNumberTerms[446], + "447": intNumberTerms[447], + "448": intNumberTerms[448], + "449": intNumberTerms[449], + "450": intNumberTerms[450], + "451": intNumberTerms[451], + "452": intNumberTerms[452], + "453": intNumberTerms[453], + "454": intNumberTerms[454], + "455": intNumberTerms[455], + "456": intNumberTerms[456], + "457": intNumberTerms[457], + "458": intNumberTerms[458], + "459": intNumberTerms[459], + "460": intNumberTerms[460], + "461": intNumberTerms[461], + "462": intNumberTerms[462], + "463": intNumberTerms[463], + "464": intNumberTerms[464], + "465": intNumberTerms[465], + "466": intNumberTerms[466], + "467": intNumberTerms[467], + "468": intNumberTerms[468], + "469": intNumberTerms[469], + "470": intNumberTerms[470], + "471": intNumberTerms[471], + "472": intNumberTerms[472], + "473": intNumberTerms[473], + "474": intNumberTerms[474], + "475": intNumberTerms[475], + "476": intNumberTerms[476], + "477": intNumberTerms[477], + "478": intNumberTerms[478], + "479": intNumberTerms[479], + "480": intNumberTerms[480], + "481": intNumberTerms[481], + "482": intNumberTerms[482], + "483": intNumberTerms[483], + "484": intNumberTerms[484], + "485": intNumberTerms[485], + "486": intNumberTerms[486], + "487": intNumberTerms[487], + "488": intNumberTerms[488], + "489": intNumberTerms[489], + "490": intNumberTerms[490], + "491": intNumberTerms[491], + "492": intNumberTerms[492], + "493": intNumberTerms[493], + "494": intNumberTerms[494], + "495": intNumberTerms[495], + "496": intNumberTerms[496], + "497": intNumberTerms[497], + "498": intNumberTerms[498], + "499": intNumberTerms[499], + "500": intNumberTerms[500], + "501": intNumberTerms[501], + "502": intNumberTerms[502], + "503": intNumberTerms[503], + "504": intNumberTerms[504], + "505": intNumberTerms[505], + "506": intNumberTerms[506], + "507": intNumberTerms[507], + "508": intNumberTerms[508], + "509": intNumberTerms[509], + "510": intNumberTerms[510], + "511": intNumberTerms[511], + "512": intNumberTerms[512], +} + +var intNumberTerms = [...]*Term{ + {Value: Number("0")}, + {Value: Number("1")}, + {Value: Number("2")}, + {Value: Number("3")}, + {Value: Number("4")}, + {Value: Number("5")}, + {Value: Number("6")}, + {Value: Number("7")}, + {Value: Number("8")}, + {Value: Number("9")}, + {Value: Number("10")}, + {Value: Number("11")}, + {Value: Number("12")}, + {Value: Number("13")}, + {Value: Number("14")}, + {Value: Number("15")}, + {Value: Number("16")}, + {Value: Number("17")}, + {Value: Number("18")}, + {Value: Number("19")}, + {Value: Number("20")}, + {Value: Number("21")}, + {Value: Number("22")}, + {Value: Number("23")}, + {Value: Number("24")}, + {Value: Number("25")}, + {Value: Number("26")}, + {Value: Number("27")}, + {Value: Number("28")}, + {Value: Number("29")}, + {Value: Number("30")}, + {Value: Number("31")}, + {Value: Number("32")}, + {Value: Number("33")}, + {Value: Number("34")}, + {Value: Number("35")}, + {Value: Number("36")}, + {Value: Number("37")}, + {Value: Number("38")}, + {Value: Number("39")}, + {Value: Number("40")}, + {Value: Number("41")}, + {Value: Number("42")}, + {Value: Number("43")}, + {Value: Number("44")}, + {Value: Number("45")}, + {Value: Number("46")}, + {Value: Number("47")}, + {Value: Number("48")}, + {Value: Number("49")}, + {Value: Number("50")}, + {Value: Number("51")}, + {Value: Number("52")}, + {Value: Number("53")}, + {Value: Number("54")}, + {Value: Number("55")}, + {Value: Number("56")}, + {Value: Number("57")}, + {Value: Number("58")}, + {Value: Number("59")}, + {Value: Number("60")}, + {Value: Number("61")}, + {Value: Number("62")}, + {Value: Number("63")}, + {Value: Number("64")}, + {Value: Number("65")}, + {Value: Number("66")}, + {Value: Number("67")}, + {Value: Number("68")}, + {Value: Number("69")}, + {Value: Number("70")}, + {Value: Number("71")}, + {Value: Number("72")}, + {Value: Number("73")}, + {Value: Number("74")}, + {Value: Number("75")}, + {Value: Number("76")}, + {Value: Number("77")}, + {Value: Number("78")}, + {Value: Number("79")}, + {Value: Number("80")}, + {Value: Number("81")}, + {Value: Number("82")}, + {Value: Number("83")}, + {Value: Number("84")}, + {Value: Number("85")}, + {Value: Number("86")}, + {Value: Number("87")}, + {Value: Number("88")}, + {Value: Number("89")}, + {Value: Number("90")}, + {Value: Number("91")}, + {Value: Number("92")}, + {Value: Number("93")}, + {Value: Number("94")}, + {Value: Number("95")}, + {Value: Number("96")}, + {Value: Number("97")}, + {Value: Number("98")}, + {Value: Number("99")}, + {Value: Number("100")}, + {Value: Number("101")}, + {Value: Number("102")}, + {Value: Number("103")}, + {Value: Number("104")}, + {Value: Number("105")}, + {Value: Number("106")}, + {Value: Number("107")}, + {Value: Number("108")}, + {Value: Number("109")}, + {Value: Number("110")}, + {Value: Number("111")}, + {Value: Number("112")}, + {Value: Number("113")}, + {Value: Number("114")}, + {Value: Number("115")}, + {Value: Number("116")}, + {Value: Number("117")}, + {Value: Number("118")}, + {Value: Number("119")}, + {Value: Number("120")}, + {Value: Number("121")}, + {Value: Number("122")}, + {Value: Number("123")}, + {Value: Number("124")}, + {Value: Number("125")}, + {Value: Number("126")}, + {Value: Number("127")}, + {Value: Number("128")}, + {Value: Number("129")}, + {Value: Number("130")}, + {Value: Number("131")}, + {Value: Number("132")}, + {Value: Number("133")}, + {Value: Number("134")}, + {Value: Number("135")}, + {Value: Number("136")}, + {Value: Number("137")}, + {Value: Number("138")}, + {Value: Number("139")}, + {Value: Number("140")}, + {Value: Number("141")}, + {Value: Number("142")}, + {Value: Number("143")}, + {Value: Number("144")}, + {Value: Number("145")}, + {Value: Number("146")}, + {Value: Number("147")}, + {Value: Number("148")}, + {Value: Number("149")}, + {Value: Number("150")}, + {Value: Number("151")}, + {Value: Number("152")}, + {Value: Number("153")}, + {Value: Number("154")}, + {Value: Number("155")}, + {Value: Number("156")}, + {Value: Number("157")}, + {Value: Number("158")}, + {Value: Number("159")}, + {Value: Number("160")}, + {Value: Number("161")}, + {Value: Number("162")}, + {Value: Number("163")}, + {Value: Number("164")}, + {Value: Number("165")}, + {Value: Number("166")}, + {Value: Number("167")}, + {Value: Number("168")}, + {Value: Number("169")}, + {Value: Number("170")}, + {Value: Number("171")}, + {Value: Number("172")}, + {Value: Number("173")}, + {Value: Number("174")}, + {Value: Number("175")}, + {Value: Number("176")}, + {Value: Number("177")}, + {Value: Number("178")}, + {Value: Number("179")}, + {Value: Number("180")}, + {Value: Number("181")}, + {Value: Number("182")}, + {Value: Number("183")}, + {Value: Number("184")}, + {Value: Number("185")}, + {Value: Number("186")}, + {Value: Number("187")}, + {Value: Number("188")}, + {Value: Number("189")}, + {Value: Number("190")}, + {Value: Number("191")}, + {Value: Number("192")}, + {Value: Number("193")}, + {Value: Number("194")}, + {Value: Number("195")}, + {Value: Number("196")}, + {Value: Number("197")}, + {Value: Number("198")}, + {Value: Number("199")}, + {Value: Number("200")}, + {Value: Number("201")}, + {Value: Number("202")}, + {Value: Number("203")}, + {Value: Number("204")}, + {Value: Number("205")}, + {Value: Number("206")}, + {Value: Number("207")}, + {Value: Number("208")}, + {Value: Number("209")}, + {Value: Number("210")}, + {Value: Number("211")}, + {Value: Number("212")}, + {Value: Number("213")}, + {Value: Number("214")}, + {Value: Number("215")}, + {Value: Number("216")}, + {Value: Number("217")}, + {Value: Number("218")}, + {Value: Number("219")}, + {Value: Number("220")}, + {Value: Number("221")}, + {Value: Number("222")}, + {Value: Number("223")}, + {Value: Number("224")}, + {Value: Number("225")}, + {Value: Number("226")}, + {Value: Number("227")}, + {Value: Number("228")}, + {Value: Number("229")}, + {Value: Number("230")}, + {Value: Number("231")}, + {Value: Number("232")}, + {Value: Number("233")}, + {Value: Number("234")}, + {Value: Number("235")}, + {Value: Number("236")}, + {Value: Number("237")}, + {Value: Number("238")}, + {Value: Number("239")}, + {Value: Number("240")}, + {Value: Number("241")}, + {Value: Number("242")}, + {Value: Number("243")}, + {Value: Number("244")}, + {Value: Number("245")}, + {Value: Number("246")}, + {Value: Number("247")}, + {Value: Number("248")}, + {Value: Number("249")}, + {Value: Number("250")}, + {Value: Number("251")}, + {Value: Number("252")}, + {Value: Number("253")}, + {Value: Number("254")}, + {Value: Number("255")}, + {Value: Number("256")}, + {Value: Number("257")}, + {Value: Number("258")}, + {Value: Number("259")}, + {Value: Number("260")}, + {Value: Number("261")}, + {Value: Number("262")}, + {Value: Number("263")}, + {Value: Number("264")}, + {Value: Number("265")}, + {Value: Number("266")}, + {Value: Number("267")}, + {Value: Number("268")}, + {Value: Number("269")}, + {Value: Number("270")}, + {Value: Number("271")}, + {Value: Number("272")}, + {Value: Number("273")}, + {Value: Number("274")}, + {Value: Number("275")}, + {Value: Number("276")}, + {Value: Number("277")}, + {Value: Number("278")}, + {Value: Number("279")}, + {Value: Number("280")}, + {Value: Number("281")}, + {Value: Number("282")}, + {Value: Number("283")}, + {Value: Number("284")}, + {Value: Number("285")}, + {Value: Number("286")}, + {Value: Number("287")}, + {Value: Number("288")}, + {Value: Number("289")}, + {Value: Number("290")}, + {Value: Number("291")}, + {Value: Number("292")}, + {Value: Number("293")}, + {Value: Number("294")}, + {Value: Number("295")}, + {Value: Number("296")}, + {Value: Number("297")}, + {Value: Number("298")}, + {Value: Number("299")}, + {Value: Number("300")}, + {Value: Number("301")}, + {Value: Number("302")}, + {Value: Number("303")}, + {Value: Number("304")}, + {Value: Number("305")}, + {Value: Number("306")}, + {Value: Number("307")}, + {Value: Number("308")}, + {Value: Number("309")}, + {Value: Number("310")}, + {Value: Number("311")}, + {Value: Number("312")}, + {Value: Number("313")}, + {Value: Number("314")}, + {Value: Number("315")}, + {Value: Number("316")}, + {Value: Number("317")}, + {Value: Number("318")}, + {Value: Number("319")}, + {Value: Number("320")}, + {Value: Number("321")}, + {Value: Number("322")}, + {Value: Number("323")}, + {Value: Number("324")}, + {Value: Number("325")}, + {Value: Number("326")}, + {Value: Number("327")}, + {Value: Number("328")}, + {Value: Number("329")}, + {Value: Number("330")}, + {Value: Number("331")}, + {Value: Number("332")}, + {Value: Number("333")}, + {Value: Number("334")}, + {Value: Number("335")}, + {Value: Number("336")}, + {Value: Number("337")}, + {Value: Number("338")}, + {Value: Number("339")}, + {Value: Number("340")}, + {Value: Number("341")}, + {Value: Number("342")}, + {Value: Number("343")}, + {Value: Number("344")}, + {Value: Number("345")}, + {Value: Number("346")}, + {Value: Number("347")}, + {Value: Number("348")}, + {Value: Number("349")}, + {Value: Number("350")}, + {Value: Number("351")}, + {Value: Number("352")}, + {Value: Number("353")}, + {Value: Number("354")}, + {Value: Number("355")}, + {Value: Number("356")}, + {Value: Number("357")}, + {Value: Number("358")}, + {Value: Number("359")}, + {Value: Number("360")}, + {Value: Number("361")}, + {Value: Number("362")}, + {Value: Number("363")}, + {Value: Number("364")}, + {Value: Number("365")}, + {Value: Number("366")}, + {Value: Number("367")}, + {Value: Number("368")}, + {Value: Number("369")}, + {Value: Number("370")}, + {Value: Number("371")}, + {Value: Number("372")}, + {Value: Number("373")}, + {Value: Number("374")}, + {Value: Number("375")}, + {Value: Number("376")}, + {Value: Number("377")}, + {Value: Number("378")}, + {Value: Number("379")}, + {Value: Number("380")}, + {Value: Number("381")}, + {Value: Number("382")}, + {Value: Number("383")}, + {Value: Number("384")}, + {Value: Number("385")}, + {Value: Number("386")}, + {Value: Number("387")}, + {Value: Number("388")}, + {Value: Number("389")}, + {Value: Number("390")}, + {Value: Number("391")}, + {Value: Number("392")}, + {Value: Number("393")}, + {Value: Number("394")}, + {Value: Number("395")}, + {Value: Number("396")}, + {Value: Number("397")}, + {Value: Number("398")}, + {Value: Number("399")}, + {Value: Number("400")}, + {Value: Number("401")}, + {Value: Number("402")}, + {Value: Number("403")}, + {Value: Number("404")}, + {Value: Number("405")}, + {Value: Number("406")}, + {Value: Number("407")}, + {Value: Number("408")}, + {Value: Number("409")}, + {Value: Number("410")}, + {Value: Number("411")}, + {Value: Number("412")}, + {Value: Number("413")}, + {Value: Number("414")}, + {Value: Number("415")}, + {Value: Number("416")}, + {Value: Number("417")}, + {Value: Number("418")}, + {Value: Number("419")}, + {Value: Number("420")}, + {Value: Number("421")}, + {Value: Number("422")}, + {Value: Number("423")}, + {Value: Number("424")}, + {Value: Number("425")}, + {Value: Number("426")}, + {Value: Number("427")}, + {Value: Number("428")}, + {Value: Number("429")}, + {Value: Number("430")}, + {Value: Number("431")}, + {Value: Number("432")}, + {Value: Number("433")}, + {Value: Number("434")}, + {Value: Number("435")}, + {Value: Number("436")}, + {Value: Number("437")}, + {Value: Number("438")}, + {Value: Number("439")}, + {Value: Number("440")}, + {Value: Number("441")}, + {Value: Number("442")}, + {Value: Number("443")}, + {Value: Number("444")}, + {Value: Number("445")}, + {Value: Number("446")}, + {Value: Number("447")}, + {Value: Number("448")}, + {Value: Number("449")}, + {Value: Number("450")}, + {Value: Number("451")}, + {Value: Number("452")}, + {Value: Number("453")}, + {Value: Number("454")}, + {Value: Number("455")}, + {Value: Number("456")}, + {Value: Number("457")}, + {Value: Number("458")}, + {Value: Number("459")}, + {Value: Number("460")}, + {Value: Number("461")}, + {Value: Number("462")}, + {Value: Number("463")}, + {Value: Number("464")}, + {Value: Number("465")}, + {Value: Number("466")}, + {Value: Number("467")}, + {Value: Number("468")}, + {Value: Number("469")}, + {Value: Number("470")}, + {Value: Number("471")}, + {Value: Number("472")}, + {Value: Number("473")}, + {Value: Number("474")}, + {Value: Number("475")}, + {Value: Number("476")}, + {Value: Number("477")}, + {Value: Number("478")}, + {Value: Number("479")}, + {Value: Number("480")}, + {Value: Number("481")}, + {Value: Number("482")}, + {Value: Number("483")}, + {Value: Number("484")}, + {Value: Number("485")}, + {Value: Number("486")}, + {Value: Number("487")}, + {Value: Number("488")}, + {Value: Number("489")}, + {Value: Number("490")}, + {Value: Number("491")}, + {Value: Number("492")}, + {Value: Number("493")}, + {Value: Number("494")}, + {Value: Number("495")}, + {Value: Number("496")}, + {Value: Number("497")}, + {Value: Number("498")}, + {Value: Number("499")}, + {Value: Number("500")}, + {Value: Number("501")}, + {Value: Number("502")}, + {Value: Number("503")}, + {Value: Number("504")}, + {Value: Number("505")}, + {Value: Number("506")}, + {Value: Number("507")}, + {Value: Number("508")}, + {Value: Number("509")}, + {Value: Number("510")}, + {Value: Number("511")}, + {Value: Number("512")}, +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go b/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go new file mode 100644 index 00000000000..9081fe7039a --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go @@ -0,0 +1,106 @@ +// Copyright 2023 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// This package provides options for JSON marshalling of AST nodes, and location +// data in particular. Since location data occupies a significant portion of the +// AST when included, it is excluded by default. The options provided here allow +// changing that behavior — either for all nodes or for specific types. Since +// JSONMarshaller implementations have access only to the node being marshaled, +// our options are to either attach these settings to *all* nodes in the AST, or +// to provide them via global state. The former is perhaps a little more elegant, +// and is what we went with initially. The cost of attaching these settings to +// every node however turned out to be non-negligible, and given that the number +// of users who have an interest in AST serialization are likely to be few, we +// have since switched to using global state, as provided here. Note that this +// is mostly to provide an equivalent feature to what we had before, should +// anyone depend on that. Users who need fine-grained control over AST +// serialization are recommended to use external libraries for that purpose, +// such as `github.com/json-iterator/go`. +package json + +import "sync" + +// Options defines the options for JSON operations, +// currently only marshaling can be configured +type Options struct { + MarshalOptions MarshalOptions +} + +// MarshalOptions defines the options for JSON marshaling, +// currently only toggling the marshaling of location information is supported +type MarshalOptions struct { + // IncludeLocation toggles the marshaling of location information + IncludeLocation NodeToggle + // IncludeLocationText additionally/optionally includes the text of the location + IncludeLocationText bool + // ExcludeLocationFile additionally/optionally excludes the file of the location + // Note that this is inverted (i.e. not "include" as the default needs to remain false) + ExcludeLocationFile bool +} + +// NodeToggle is a generic struct to allow the toggling of +// settings for different ast node types +type NodeToggle struct { + Term bool + Package bool + Comment bool + Import bool + Rule bool + Head bool + Expr bool + SomeDecl bool + Every bool + With bool + Annotations bool + AnnotationsRef bool +} + +// configuredJSONOptions synchronizes access to the global JSON options +type configuredJSONOptions struct { + options Options + lock sync.RWMutex +} + +var options = &configuredJSONOptions{ + options: Defaults(), +} + +// SetOptions sets the global options for marshalling AST nodes to JSON +func SetOptions(opts Options) { + options.lock.Lock() + defer options.lock.Unlock() + options.options = opts +} + +// GetOptions returns (a copy of) the global options for marshalling AST nodes to JSON +func GetOptions() Options { + options.lock.RLock() + defer options.lock.RUnlock() + return options.options +} + +// Defaults returns the default JSON options, which is to exclude location +// information in serialized JSON AST. +func Defaults() Options { + return Options{ + MarshalOptions: MarshalOptions{ + IncludeLocation: NodeToggle{ + Term: false, + Package: false, + Comment: false, + Import: false, + Rule: false, + Head: false, + Expr: false, + SomeDecl: false, + Every: false, + With: false, + Annotations: false, + AnnotationsRef: false, + }, + IncludeLocationText: false, + ExcludeLocationFile: false, + }, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/ast/location/location.go b/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go similarity index 91% rename from vendor/github.com/open-policy-agent/opa/ast/location/location.go rename to vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go index 92226df3f0b..716aad6930f 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/location/location.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go @@ -7,7 +7,7 @@ import ( "errors" "fmt" - astJSON "github.com/open-policy-agent/opa/ast/json" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" ) // Location records a position in source code @@ -18,9 +18,6 @@ type Location struct { Col int `json:"col"` // The column in the row. Offset int `json:"-"` // The byte offset for the location in the source. - // JSONOptions specifies options for marshaling and unmarshalling of locations - JSONOptions astJSON.Options - Tabs []int `json:"-"` // The column offsets of tabs in the source. } @@ -98,7 +95,8 @@ func (loc *Location) Compare(other *Location) int { func (loc *Location) MarshalJSON() ([]byte, error) { // structs are used here to preserve the field ordering of the original Location struct - if loc.JSONOptions.MarshalOptions.ExcludeLocationFile { + jsonOptions := astJSON.GetOptions().MarshalOptions + if jsonOptions.ExcludeLocationFile { data := struct { Row int `json:"row"` Col int `json:"col"` @@ -108,7 +106,7 @@ func (loc *Location) MarshalJSON() ([]byte, error) { Col: loc.Col, } - if loc.JSONOptions.MarshalOptions.IncludeLocationText { + if jsonOptions.IncludeLocationText { data.Text = loc.Text } @@ -126,7 +124,7 @@ func (loc *Location) MarshalJSON() ([]byte, error) { File: loc.File, } - if loc.JSONOptions.MarshalOptions.IncludeLocationText { + if jsonOptions.IncludeLocationText { data.Text = loc.Text } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/map.go b/vendor/github.com/open-policy-agent/opa/v1/ast/map.go new file mode 100644 index 00000000000..d0aa43755f9 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/map.go @@ -0,0 +1,108 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "encoding/json" + + "github.com/open-policy-agent/opa/v1/util" +) + +// ValueMap represents a key/value map between AST term values. Any type of term +// can be used as a key in the map. +type ValueMap struct { + hashMap *util.TypedHashMap[Value, Value] +} + +// NewValueMap returns a new ValueMap. +func NewValueMap() *ValueMap { + return &ValueMap{ + hashMap: util.NewTypedHashMap(ValueEqual, ValueEqual, Value.Hash, Value.Hash, nil), + } +} + +// MarshalJSON provides a custom marshaller for the ValueMap which +// will include the key, value, and value type. +func (vs *ValueMap) MarshalJSON() ([]byte, error) { + var tmp []map[string]interface{} + vs.Iter(func(k Value, v Value) bool { + tmp = append(tmp, map[string]interface{}{ + "name": k.String(), + "type": ValueName(v), + "value": v, + }) + return false + }) + return json.Marshal(tmp) +} + +// Equal returns true if this ValueMap equals the other. +func (vs *ValueMap) Equal(other *ValueMap) bool { + if vs == nil { + return other == nil || other.Len() == 0 + } + if other == nil { + return vs.Len() == 0 + } + return vs.hashMap.Equal(other.hashMap) +} + +// Len returns the number of elements in the map. +func (vs *ValueMap) Len() int { + if vs == nil { + return 0 + } + return vs.hashMap.Len() +} + +// Get returns the value in the map for k. +func (vs *ValueMap) Get(k Value) Value { + if vs != nil { + if v, ok := vs.hashMap.Get(k); ok { + return v + } + } + return nil +} + +// Hash returns a hash code for this ValueMap. +func (vs *ValueMap) Hash() int { + if vs == nil { + return 0 + } + return vs.hashMap.Hash() +} + +// Iter calls the iter function for each key/value pair in the map. If the iter +// function returns true, iteration stops. +func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool { + if vs == nil { + return false + } + return vs.hashMap.Iter(iter) +} + +// Put inserts a key k into the map with value v. +func (vs *ValueMap) Put(k, v Value) { + if vs == nil { + panic("put on nil value map") + } + vs.hashMap.Put(k, v) +} + +// Delete removes a key k from the map. +func (vs *ValueMap) Delete(k Value) { + if vs == nil { + return + } + vs.hashMap.Delete(k) +} + +func (vs *ValueMap) String() string { + if vs == nil { + return "{}" + } + return vs.hashMap.String() +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go new file mode 100644 index 00000000000..66779b8d756 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go @@ -0,0 +1,2773 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math/big" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "unicode/utf8" + + "gopkg.in/yaml.v3" + + "github.com/open-policy-agent/opa/v1/ast/internal/scanner" + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/ast/location" +) + +var RegoV1CompatibleRef = Ref{VarTerm("rego"), StringTerm("v1")} + +// RegoVersion defines the Rego syntax requirements for a module. +type RegoVersion int + +const DefaultRegoVersion = RegoV1 + +const ( + RegoUndefined RegoVersion = iota + // RegoV0 is the default, original Rego syntax. + RegoV0 + // RegoV0CompatV1 requires modules to comply with both the RegoV0 and RegoV1 syntax (as when 'rego.v1' is imported in a module). + // Shortly, RegoV1 compatibility is required, but 'rego.v1' or 'future.keywords' must also be imported. + RegoV0CompatV1 + // RegoV1 is the Rego syntax enforced by OPA 1.0; e.g.: + // future.keywords part of default keyword set, and don't require imports; + // 'if' and 'contains' required in rule heads; + // (some) strict checks on by default. + RegoV1 +) + +func (v RegoVersion) Int() int { + if v == RegoV1 { + return 1 + } + return 0 +} + +func (v RegoVersion) String() string { + switch v { + case RegoV0: + return "v0" + case RegoV1: + return "v1" + case RegoV0CompatV1: + return "v0v1" + default: + return "unknown" + } +} + +func RegoVersionFromInt(i int) RegoVersion { + if i == 1 { + return RegoV1 + } + return RegoV0 +} + +// Note: This state is kept isolated from the parser so that we +// can do efficient shallow copies of these values when doing a +// save() and restore(). +type state struct { + s *scanner.Scanner + lastEnd int + skippedNL bool + tok tokens.Token + tokEnd int + lit string + loc Location + errors Errors + hints []string + comments []*Comment + wildcard int +} + +func (s *state) String() string { + return fmt.Sprintf("", s.s, s.tok, s.lit, s.loc, len(s.errors), len(s.comments)) +} + +func (s *state) Loc() *location.Location { + cpy := s.loc + return &cpy +} + +func (s *state) Text(offset, end int) []byte { + bs := s.s.Bytes() + if offset >= 0 && offset < len(bs) { + if end >= offset && end <= len(bs) { + return bs[offset:end] + } + } + return nil +} + +// Parser is used to parse Rego statements. +type Parser struct { + r io.Reader + s *state + po ParserOptions + cache parsedTermCache +} + +type parsedTermCacheItem struct { + t *Term + post *state // post is the post-state that's restored on a cache-hit + offset int + next *parsedTermCacheItem +} + +type parsedTermCache struct { + m *parsedTermCacheItem +} + +func (c parsedTermCache) String() string { + s := strings.Builder{} + s.WriteRune('{') + var e *parsedTermCacheItem + for e = c.m; e != nil; e = e.next { + s.WriteString(e.String()) + } + s.WriteRune('}') + return s.String() +} + +func (e *parsedTermCacheItem) String() string { + return fmt.Sprintf("<%d:%v>", e.offset, e.t) +} + +// ParserOptions defines the options for parsing Rego statements. +type ParserOptions struct { + Capabilities *Capabilities + ProcessAnnotation bool + AllFutureKeywords bool + FutureKeywords []string + SkipRules bool + // RegoVersion is the version of Rego to parse for. + RegoVersion RegoVersion + unreleasedKeywords bool // TODO(sr): cleanup +} + +// EffectiveRegoVersion returns the effective RegoVersion to use for parsing. +func (po *ParserOptions) EffectiveRegoVersion() RegoVersion { + if po.RegoVersion == RegoUndefined { + return DefaultRegoVersion + } + return po.RegoVersion +} + +// NewParser creates and initializes a Parser. +func NewParser() *Parser { + p := &Parser{ + s: &state{}, + po: ParserOptions{}, + } + return p +} + +// WithFilename provides the filename for Location details +// on parsed statements. +func (p *Parser) WithFilename(filename string) *Parser { + p.s.loc.File = filename + return p +} + +// WithReader provides the io.Reader that the parser will +// use as its source. +func (p *Parser) WithReader(r io.Reader) *Parser { + p.r = r + return p +} + +// WithProcessAnnotation enables or disables the processing of +// annotations by the Parser +func (p *Parser) WithProcessAnnotation(processAnnotation bool) *Parser { + p.po.ProcessAnnotation = processAnnotation + return p +} + +// WithFutureKeywords enables "future" keywords, i.e., keywords that can +// be imported via +// +// import future.keywords.kw +// import future.keywords.other +// +// but in a more direct way. The equivalent of this import would be +// +// WithFutureKeywords("kw", "other") +func (p *Parser) WithFutureKeywords(kws ...string) *Parser { + p.po.FutureKeywords = kws + return p +} + +// WithAllFutureKeywords enables all "future" keywords, i.e., the +// ParserOption equivalent of +// +// import future.keywords +func (p *Parser) WithAllFutureKeywords(yes bool) *Parser { + p.po.AllFutureKeywords = yes + return p +} + +// withUnreleasedKeywords allows using keywords that haven't surfaced +// as future keywords (see above) yet, but have tests that require +// them to be parsed +func (p *Parser) withUnreleasedKeywords(yes bool) *Parser { + p.po.unreleasedKeywords = yes + return p +} + +// WithCapabilities sets the capabilities structure on the parser. +func (p *Parser) WithCapabilities(c *Capabilities) *Parser { + p.po.Capabilities = c + return p +} + +// WithSkipRules instructs the parser not to attempt to parse Rule statements. +func (p *Parser) WithSkipRules(skip bool) *Parser { + p.po.SkipRules = skip + return p +} + +// WithJSONOptions sets the JSON options on the parser (now a no-op). +// +// Deprecated: Use SetOptions in the json package instead, where a longer description +// of why this is deprecated also can be found. +func (p *Parser) WithJSONOptions(_ *astJSON.Options) *Parser { + return p +} + +func (p *Parser) WithRegoVersion(version RegoVersion) *Parser { + p.po.RegoVersion = version + return p +} + +func (p *Parser) parsedTermCacheLookup() (*Term, *state) { + l := p.s.loc.Offset + // stop comparing once the cached offsets are lower than l + for h := p.cache.m; h != nil && h.offset >= l; h = h.next { + if h.offset == l { + return h.t, h.post + } + } + return nil, nil +} + +func (p *Parser) parsedTermCachePush(t *Term, s0 *state) { + s1 := p.save() + o0 := s0.loc.Offset + entry := parsedTermCacheItem{t: t, post: s1, offset: o0} + + // find the first one whose offset is smaller than ours + var e *parsedTermCacheItem + for e = p.cache.m; e != nil; e = e.next { + if e.offset < o0 { + break + } + } + entry.next = e + p.cache.m = &entry +} + +// futureParser returns a shallow copy of `p` with an empty +// cache, and a scanner that knows all future keywords. +// It's used to present hints in errors, when statements would +// only parse successfully if some future keyword is enabled. +func (p *Parser) futureParser() *Parser { + q := *p + q.s = p.save() + q.s.s = p.s.s.WithKeywords(allFutureKeywords) + q.cache = parsedTermCache{} + return &q +} + +// presentParser returns a shallow copy of `p` with an empty +// cache, and a scanner that knows none of the future keywords. +// It is used to successfully parse keyword imports, like +// +// import future.keywords.in +// +// even when the parser has already been informed about the +// future keyword "in". This parser won't error out because +// "in" is an identifier. +func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) { + var cpy map[string]tokens.Token + q := *p + q.s = p.save() + q.s.s, cpy = p.s.s.WithoutKeywords(allFutureKeywords) + q.cache = parsedTermCache{} + return &q, cpy +} + +// Parse will read the Rego source and parse statements and +// comments as they are found. Any errors encountered while +// parsing will be accumulated and returned as a list of Errors. +func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { + + if p.po.Capabilities == nil { + p.po.Capabilities = CapabilitiesForThisVersion(CapabilitiesRegoVersion(p.po.RegoVersion)) + } + + allowedFutureKeywords := map[string]tokens.Token{} + + if p.po.EffectiveRegoVersion() == RegoV1 { + if !p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: "illegal capabilities: rego_v1 feature required for parsing v1 Rego", + Location: nil, + }, + } + } + + // rego-v1 includes all v0 future keywords in the default language definition + for k, v := range futureKeywordsV0 { + allowedFutureKeywords[k] = v + } + + for _, kw := range p.po.Capabilities.FutureKeywords { + if tok, ok := futureKeywords[kw]; ok { + allowedFutureKeywords[kw] = tok + } else { + // For sake of error reporting, we still need to check that keywords in capabilities are known in v0 + if _, ok := futureKeywordsV0[kw]; !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), + Location: nil, + }, + } + } + } + } + + // Check that explicitly requested future keywords are known. + for _, kw := range p.po.FutureKeywords { + if _, ok := allowedFutureKeywords[kw]; !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("unknown future keyword: %v", kw), + Location: nil, + }, + } + } + } + } else { + for _, kw := range p.po.Capabilities.FutureKeywords { + var ok bool + allowedFutureKeywords[kw], ok = allFutureKeywords[kw] + if !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), + Location: nil, + }, + } + } + } + + if p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + // rego-v1 includes all v0 future keywords in the default language definition + for k, v := range futureKeywordsV0 { + allowedFutureKeywords[k] = v + } + } + } + + var err error + p.s.s, err = scanner.New(p.r) + if err != nil { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: err.Error(), + Location: nil, + }, + } + } + + selected := map[string]tokens.Token{} + if p.po.AllFutureKeywords || p.po.EffectiveRegoVersion() == RegoV1 { + for kw, tok := range allowedFutureKeywords { + selected[kw] = tok + } + } else { + for _, kw := range p.po.FutureKeywords { + tok, ok := allowedFutureKeywords[kw] + if !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("unknown future keyword: %v", kw), + Location: nil, + }, + } + } + selected[kw] = tok + } + } + p.s.s = p.s.s.WithKeywords(selected) + + if p.po.EffectiveRegoVersion() == RegoV1 { + for kw, tok := range allowedFutureKeywords { + p.s.s.AddKeyword(kw, tok) + } + } + + // read the first token to initialize the parser + p.scan() + + var stmts []Statement + + // Read from the scanner until the last token is reached or no statements + // can be parsed. Attempt to parse package statements, import statements, + // rule statements, and then body/query statements (in that order). If a + // statement cannot be parsed, restore the parser state before trying the + // next type of statement. If a statement can be parsed, continue from that + // point trying to parse packages, imports, etc. in the same order. + for p.s.tok != tokens.EOF { + + s := p.save() + + if pkg := p.parsePackage(); pkg != nil { + stmts = append(stmts, pkg) + continue + } else if len(p.s.errors) > 0 { + break + } + + p.restore(s) + s = p.save() + + if imp := p.parseImport(); imp != nil { + if RegoRootDocument.Equal(imp.Path.Value.(Ref)[0]) { + p.regoV1Import(imp) + } + + if FutureRootDocument.Equal(imp.Path.Value.(Ref)[0]) { + p.futureImport(imp, allowedFutureKeywords) + } + + stmts = append(stmts, imp) + continue + } else if len(p.s.errors) > 0 { + break + } + + p.restore(s) + + if !p.po.SkipRules { + s = p.save() + + if rules := p.parseRules(); rules != nil { + for i := range rules { + stmts = append(stmts, rules[i]) + } + continue + } else if len(p.s.errors) > 0 { + break + } + + p.restore(s) + } + + if body := p.parseQuery(true, tokens.EOF); body != nil { + stmts = append(stmts, body) + continue + } + + break + } + + if p.po.ProcessAnnotation { + stmts = p.parseAnnotations(stmts) + } + + return stmts, p.s.comments, p.s.errors +} + +func (p *Parser) parseAnnotations(stmts []Statement) []Statement { + + annotStmts, errs := parseAnnotations(p.s.comments) + for _, err := range errs { + p.error(err.Location, err.Message) + } + + for _, annotStmt := range annotStmts { + stmts = append(stmts, annotStmt) + } + + return stmts +} + +func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) { + + var hint = []byte("METADATA") + var curr *metadataParser + var blocks []*metadataParser + + for i := range comments { + if curr != nil { + if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 { + curr.Append(comments[i]) + continue + } + curr = nil + } + if bytes.HasPrefix(bytes.TrimSpace(comments[i].Text), hint) { + curr = newMetadataParser(comments[i].Location) + blocks = append(blocks, curr) + } + } + + var stmts []*Annotations + var errs Errors + for _, b := range blocks { + a, err := b.Parse() + if err != nil { + errs = append(errs, &Error{ + Code: ParseErr, + Message: err.Error(), + Location: b.loc, + }) + } else { + stmts = append(stmts, a) + } + } + + return stmts, errs +} + +func (p *Parser) parsePackage() *Package { + + var pkg Package + pkg.SetLoc(p.s.Loc()) + + if p.s.tok != tokens.Package { + return nil + } + + p.scan() + if p.s.tok != tokens.Ident { + p.illegalToken() + return nil + } + + term := p.parseTerm() + + if term != nil { + switch v := term.Value.(type) { + case Var: + pkg.Path = Ref{ + DefaultRootDocument.Copy().SetLocation(term.Location), + StringTerm(string(v)).SetLocation(term.Location), + } + case Ref: + pkg.Path = make(Ref, len(v)+1) + pkg.Path[0] = DefaultRootDocument.Copy().SetLocation(v[0].Location) + first, ok := v[0].Value.(Var) + if !ok { + p.errorf(v[0].Location, "unexpected %v token: expecting var", ValueName(v[0].Value)) + return nil + } + pkg.Path[1] = StringTerm(string(first)).SetLocation(v[0].Location) + for i := 2; i < len(pkg.Path); i++ { + switch v[i-1].Value.(type) { + case String: + pkg.Path[i] = v[i-1] + default: + p.errorf(v[i-1].Location, "unexpected %v token: expecting string", ValueName(v[i-1].Value)) + return nil + } + } + default: + p.illegalToken() + return nil + } + } + + if pkg.Path == nil { + if len(p.s.errors) == 0 { + p.error(p.s.Loc(), "expected path") + } + return nil + } + + return &pkg +} + +func (p *Parser) parseImport() *Import { + + var imp Import + imp.SetLoc(p.s.Loc()) + + if p.s.tok != tokens.Import { + return nil + } + + p.scan() + if p.s.tok != tokens.Ident { + p.error(p.s.Loc(), "expected ident") + return nil + } + q, prev := p.presentParser() + term := q.parseTerm() + if term != nil { + switch v := term.Value.(type) { + case Var: + imp.Path = RefTerm(term).SetLocation(term.Location) + case Ref: + for i := 1; i < len(v); i++ { + if _, ok := v[i].Value.(String); !ok { + p.errorf(v[i].Location, "unexpected %v token: expecting string", ValueName(v[i].Value)) + return nil + } + } + imp.Path = term + } + } + // keep advanced parser state, reset known keywords + p.s = q.s + p.s.s = q.s.s.WithKeywords(prev) + + if imp.Path == nil { + p.error(p.s.Loc(), "expected path") + return nil + } + + path := imp.Path.Value.(Ref) + + switch { + case RootDocumentNames.Contains(path[0]): + case FutureRootDocument.Equal(path[0]): + case RegoRootDocument.Equal(path[0]): + default: + p.hint("if this is unexpected, try updating OPA") + p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v", + RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)), + path[0]) + return nil + } + + if p.s.tok == tokens.As { + p.scan() + + if p.s.tok != tokens.Ident { + p.illegal("expected var") + return nil + } + + if alias := p.parseTerm(); alias != nil { + v, ok := alias.Value.(Var) + if ok { + imp.Alias = v + return &imp + } + } + p.illegal("expected var") + return nil + } + + return &imp +} + +func (p *Parser) parseRules() []*Rule { + + var rule Rule + rule.SetLoc(p.s.Loc()) + + if p.s.tok == tokens.Default { + p.scan() + rule.Default = true + } + + if p.s.tok != tokens.Ident { + return nil + } + + usesContains := false + if rule.Head, usesContains = p.parseHead(rule.Default); rule.Head == nil { + return nil + } + + if usesContains { + rule.Head.keywords = append(rule.Head.keywords, tokens.Contains) + } + + if rule.Default { + if !p.validateDefaultRuleValue(&rule) { + return nil + } + + if len(rule.Head.Args) > 0 { + if !p.validateDefaultRuleArgs(&rule) { + return nil + } + } + + rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) + return []*Rule{&rule} + } + + // back-compat with `p[x] { ... }`` + hasIf := p.s.tok == tokens.If + + // p[x] if ... becomes a single-value rule p[x] + if hasIf && !usesContains && len(rule.Head.Ref()) == 2 { + v := rule.Head.Ref()[1] + _, isRef := v.Value.(Ref) + if (!v.IsGround() || isRef) && len(rule.Head.Args) == 0 { + rule.Head.Key = rule.Head.Ref()[1] + } + + if rule.Head.Value == nil { + rule.Head.generatedValue = true + rule.Head.Value = BooleanTerm(true).SetLocation(rule.Head.Location) + } else { + // p[x] = y if becomes a single-value rule p[x] with value y, but needs name for compat + v, ok := rule.Head.Ref()[0].Value.(Var) + if !ok { + return nil + } + rule.Head.Name = v + } + } + + // p[x] becomes a multi-value rule p + if !hasIf && !usesContains && + len(rule.Head.Args) == 0 && // not a function + len(rule.Head.Ref()) == 2 { // ref like 'p[x]' + v, ok := rule.Head.Ref()[0].Value.(Var) + if !ok { + return nil + } + rule.Head.Name = v + rule.Head.Key = rule.Head.Ref()[1] + if rule.Head.Value == nil { + rule.Head.SetRef(rule.Head.Ref()[:len(rule.Head.Ref())-1]) + } + } + + switch { + case hasIf: + rule.Head.keywords = append(rule.Head.keywords, tokens.If) + p.scan() + s := p.save() + if expr := p.parseLiteral(); expr != nil { + // NOTE(sr): set literals are never false or undefined, so parsing this as + // p if { true } + // ^^^^^^^^ set of one element, `true` + // isn't valid. + isSetLiteral := false + if t, ok := expr.Terms.(*Term); ok { + _, isSetLiteral = t.Value.(Set) + } + // expr.Term is []*Term or Every + if !isSetLiteral { + rule.Body.Append(expr) + break + } + } + + // parsing as literal didn't work out, expect '{ BODY }' + p.restore(s) + fallthrough + + case p.s.tok == tokens.LBrace: + p.scan() + if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { + return nil + } + p.scan() + + case usesContains: + rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) + rule.generatedBody = true + rule.Location = rule.Head.Location + + return []*Rule{&rule} + + default: + return nil + } + + if p.s.tok == tokens.Else { + if r := rule.Head.Ref(); len(r) > 1 && !r.IsGround() { + p.error(p.s.Loc(), "else keyword cannot be used on rules with variables in head") + return nil + } + if rule.Head.Key != nil { + p.error(p.s.Loc(), "else keyword cannot be used on multi-value rules") + return nil + } + + if rule.Else = p.parseElse(rule.Head); rule.Else == nil { + return nil + } + } + + rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) + + rules := []*Rule{&rule} + + for p.s.tok == tokens.LBrace { + + if rule.Else != nil { + p.error(p.s.Loc(), "expected else keyword") + return nil + } + + loc := p.s.Loc() + + p.scan() + var next Rule + + if next.Body = p.parseBody(tokens.RBrace); next.Body == nil { + return nil + } + p.scan() + + loc.Text = p.s.Text(loc.Offset, p.s.lastEnd) + next.SetLoc(loc) + + // Chained rule head's keep the original + // rule's head AST but have their location + // set to the rule body. + next.Head = rule.Head.Copy() + next.Head.keywords = rule.Head.keywords + for i := range next.Head.Args { + if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() { + next.Head.Args[i].Value = Var(p.genwildcard()) + } + } + setLocRecursive(next.Head, loc) + + rules = append(rules, &next) + } + + return rules +} + +func (p *Parser) parseElse(head *Head) *Rule { + + var rule Rule + rule.SetLoc(p.s.Loc()) + + rule.Head = head.Copy() + rule.Head.generatedValue = false + for i := range rule.Head.Args { + if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() { + rule.Head.Args[i].Value = Var(p.genwildcard()) + } + } + rule.Head.SetLoc(p.s.Loc()) + + defer func() { + rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) + }() + + p.scan() + + switch p.s.tok { + case tokens.LBrace, tokens.If: // no value, but a body follows directly + rule.Head.generatedValue = true + rule.Head.Value = BooleanTerm(true) + case tokens.Assign, tokens.Unify: + rule.Head.Assign = tokens.Assign == p.s.tok + p.scan() + rule.Head.Value = p.parseTermInfixCall() + if rule.Head.Value == nil { + return nil + } + rule.Head.Location.Text = p.s.Text(rule.Head.Location.Offset, p.s.lastEnd) + default: + p.illegal("expected else value term or rule body") + return nil + } + + hasIf := p.s.tok == tokens.If + hasLBrace := p.s.tok == tokens.LBrace + + if !hasIf && !hasLBrace { + rule.Body = NewBody(NewExpr(BooleanTerm(true))) + rule.generatedBody = true + setLocRecursive(rule.Body, rule.Location) + return &rule + } + + if hasIf { + rule.Head.keywords = append(rule.Head.keywords, tokens.If) + p.scan() + } + + if p.s.tok == tokens.LBrace { + p.scan() + if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { + return nil + } + p.scan() + } else if p.s.tok != tokens.EOF { + expr := p.parseLiteral() + if expr == nil { + return nil + } + rule.Body.Append(expr) + setLocRecursive(rule.Body, rule.Location) + } else { + p.illegal("rule body expected") + return nil + } + + if p.s.tok == tokens.Else { + if rule.Else = p.parseElse(head); rule.Else == nil { + return nil + } + } + return &rule +} + +func (p *Parser) parseHead(defaultRule bool) (*Head, bool) { + head := &Head{} + loc := p.s.Loc() + defer func() { + if head != nil { + head.SetLoc(loc) + head.Location.Text = p.s.Text(head.Location.Offset, p.s.lastEnd) + } + }() + + term := p.parseVar() + if term == nil { + return nil, false + } + + ref := p.parseTermFinish(term, true) + if ref == nil { + p.illegal("expected rule head name") + return nil, false + } + + switch x := ref.Value.(type) { + case Var: + // TODO + head = VarHead(x, ref.Location, nil) + case Ref: + head = RefHead(x) + case Call: + op, args := x[0], x[1:] + var ref Ref + switch y := op.Value.(type) { + case Var: + ref = Ref{op} + case Ref: + if _, ok := y[0].Value.(Var); !ok { + p.illegal("rule head ref %v invalid", y) + return nil, false + } + ref = y + } + head = RefHead(ref) + head.Args = append([]*Term{}, args...) + + default: + return nil, false + } + + name := head.Ref().String() + + switch p.s.tok { + case tokens.Contains: // NOTE: no Value for `contains` heads, we return here + // Catch error case of using 'contains' with a function definition rule head. + if head.Args != nil { + p.illegal("the contains keyword can only be used with multi-value rule definitions (e.g., %s contains { ... })", name) + } + p.scan() + head.Key = p.parseTermInfixCall() + if head.Key == nil { + p.illegal("expected rule key term (e.g., %s contains { ... })", name) + } + return head, true + + case tokens.Unify: + p.scan() + head.Value = p.parseTermInfixCall() + if head.Value == nil { + // FIX HEAD.String() + p.illegal("expected rule value term (e.g., %s[%s] = { ... })", name, head.Key) + } + case tokens.Assign: + p.scan() + head.Assign = true + head.Value = p.parseTermInfixCall() + if head.Value == nil { + switch { + case len(head.Args) > 0: + p.illegal("expected function value term (e.g., %s(...) := { ... })", name) + case head.Key != nil: + p.illegal("expected partial rule value term (e.g., %s[...] := { ... })", name) + case defaultRule: + p.illegal("expected default rule value term (e.g., default %s := )", name) + default: + p.illegal("expected rule value term (e.g., %s := { ... })", name) + } + } + } + + if head.Value == nil && head.Key == nil { + if len(head.Ref()) != 2 || len(head.Args) > 0 { + head.generatedValue = true + head.Value = BooleanTerm(true).SetLocation(head.Location) + } + } + return head, false +} + +func (p *Parser) parseBody(end tokens.Token) Body { + return p.parseQuery(false, end) +} + +func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body { + body := Body{} + + if p.s.tok == end { + p.error(p.s.Loc(), "found empty body") + return nil + } + + for { + expr := p.parseLiteral() + if expr == nil { + return nil + } + + body.Append(expr) + + if p.s.tok == tokens.Semicolon { + p.scan() + continue + } + + if p.s.tok == end || requireSemi { + return body + } + + if !p.s.skippedNL { + // If there was already an error then don't pile this one on + if len(p.s.errors) == 0 { + p.illegal(`expected \n or %s or %s`, tokens.Semicolon, end) + } + return nil + } + } +} + +func (p *Parser) parseLiteral() (expr *Expr) { + + offset := p.s.loc.Offset + loc := p.s.Loc() + + defer func() { + if expr != nil { + loc.Text = p.s.Text(offset, p.s.lastEnd) + expr.SetLoc(loc) + } + }() + + var negated bool + if p.s.tok == tokens.Not { + p.scan() + negated = true + } + + switch p.s.tok { + case tokens.Some: + if negated { + p.illegal("illegal negation of 'some'") + return nil + } + return p.parseSome() + case tokens.Every: + if negated { + p.illegal("illegal negation of 'every'") + return nil + } + return p.parseEvery() + default: + s := p.save() + expr := p.parseExpr() + if expr != nil { + expr.Negated = negated + if p.s.tok == tokens.With { + if expr.With = p.parseWith(); expr.With == nil { + return nil + } + } + // If we find a plain `every` identifier, attempt to parse an every expression, + // add hint if it succeeds. + if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { + var hint bool + t := p.save() + p.restore(s) + if expr := p.futureParser().parseEvery(); expr != nil { + _, hint = expr.Terms.(*Every) + } + p.restore(t) + if hint { + p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") + } + } + return expr + } + return nil + } +} + +func (p *Parser) parseWith() []*With { + + withs := []*With{} + + for { + + with := With{ + Location: p.s.Loc(), + } + p.scan() + + if p.s.tok != tokens.Ident { + p.illegal("expected ident") + return nil + } + + with.Target = p.parseTerm() + if with.Target == nil { + return nil + } + + switch with.Target.Value.(type) { + case Ref, Var: + break + default: + p.illegal("expected with target path") + } + + if p.s.tok != tokens.As { + p.illegal("expected as keyword") + return nil + } + + p.scan() + + if with.Value = p.parseTermInfixCall(); with.Value == nil { + return nil + } + + with.Location.Text = p.s.Text(with.Location.Offset, p.s.lastEnd) + + withs = append(withs, &with) + + if p.s.tok != tokens.With { + break + } + } + + return withs +} + +func (p *Parser) parseSome() *Expr { + + decl := &SomeDecl{} + decl.SetLoc(p.s.Loc()) + + // Attempt to parse "some x in xs", which will end up in + // SomeDecl{Symbols: ["member(x, xs)"]} + s := p.save() + p.scan() + if term := p.parseTermInfixCall(); term != nil { + if call, ok := term.Value.(Call); ok { + switch call[0].String() { + case Member.Name: + if len(call) != 3 { + p.illegal("illegal domain") + return nil + } + case MemberWithKey.Name: + if len(call) != 4 { + p.illegal("illegal domain") + return nil + } + default: + p.illegal("expected `x in xs` or `x, y in xs` expression") + return nil + } + + decl.Symbols = []*Term{term} + expr := NewExpr(decl).SetLocation(decl.Location) + if p.s.tok == tokens.With { + if expr.With = p.parseWith(); expr.With == nil { + return nil + } + } + return expr + } + } + + p.restore(s) + s = p.save() // new copy for later + var hint bool + p.scan() + if term := p.futureParser().parseTermInfixCall(); term != nil { + if call, ok := term.Value.(Call); ok { + switch call[0].String() { + case Member.Name, MemberWithKey.Name: + hint = true + } + } + } + + // go on as before, it's `some x[...]` or illegal + p.restore(s) + if hint { + p.hint("`import future.keywords.in` for `some x in xs` expressions") + } + + for { // collecting var args + + p.scan() + + if p.s.tok != tokens.Ident { + p.illegal("expected var") + return nil + } + + decl.Symbols = append(decl.Symbols, p.parseVar()) + + p.scan() + + if p.s.tok != tokens.Comma { + break + } + } + + return NewExpr(decl).SetLocation(decl.Location) +} + +func (p *Parser) parseEvery() *Expr { + qb := &Every{} + qb.SetLoc(p.s.Loc()) + + // TODO(sr): We'd get more accurate error messages if we didn't rely on + // parseTermInfixCall here, but parsed "var [, var] in term" manually. + p.scan() + term := p.parseTermInfixCall() + if term == nil { + return nil + } + call, ok := term.Value.(Call) + if !ok { + p.illegal("expected `x[, y] in xs { ... }` expression") + return nil + } + switch call[0].String() { + case Member.Name: // x in xs + if len(call) != 3 { + p.illegal("illegal domain") + return nil + } + qb.Value = call[1] + qb.Domain = call[2] + case MemberWithKey.Name: // k, v in xs + if len(call) != 4 { + p.illegal("illegal domain") + return nil + } + qb.Key = call[1] + qb.Value = call[2] + qb.Domain = call[3] + if _, ok := qb.Key.Value.(Var); !ok { + p.illegal("expected key to be a variable") + return nil + } + default: + p.illegal("expected `x[, y] in xs { ... }` expression") + return nil + } + if _, ok := qb.Value.Value.(Var); !ok { + p.illegal("expected value to be a variable") + return nil + } + if p.s.tok == tokens.LBrace { // every x in xs { ... } + p.scan() + body := p.parseBody(tokens.RBrace) + if body == nil { + return nil + } + p.scan() + qb.Body = body + expr := NewExpr(qb).SetLocation(qb.Location) + + if p.s.tok == tokens.With { + if expr.With = p.parseWith(); expr.With == nil { + return nil + } + } + return expr + } + + p.illegal("missing body") + return nil +} + +func (p *Parser) parseExpr() *Expr { + + lhs := p.parseTermInfixCall() + if lhs == nil { + return nil + } + + if op := p.parseTermOp(tokens.Assign, tokens.Unify); op != nil { + if rhs := p.parseTermInfixCall(); rhs != nil { + return NewExpr([]*Term{op, lhs, rhs}) + } + return nil + } + + // NOTE(tsandall): the top-level call term is converted to an expr because + // the evaluator does not support the call term type (nested calls are + // rewritten by the compiler.) + if call, ok := lhs.Value.(Call); ok { + return NewExpr([]*Term(call)) + } + + return NewExpr(lhs) +} + +// parseTermInfixCall consumes the next term from the input and returns it. If a +// term cannot be parsed the return value is nil and error will be recorded. The +// scanner will be advanced to the next token before returning. +// By starting out with infix relations (==, !=, <, etc) and further calling the +// other binary operators (|, &, arithmetics), it constitutes the binding +// precedence. +func (p *Parser) parseTermInfixCall() *Term { + return p.parseTermIn(nil, true, p.s.loc.Offset) +} + +func (p *Parser) parseTermInfixCallInList() *Term { + return p.parseTermIn(nil, false, p.s.loc.Offset) +} + +// use static references to avoid allocations, and +// copy them to the call term only when needed +var memberWithKeyRef = MemberWithKey.Ref() +var memberRef = Member.Ref() + +func (p *Parser) parseTermIn(lhs *Term, keyVal bool, offset int) *Term { + // NOTE(sr): `in` is a bit special: besides `lhs in rhs`, it also + // supports `key, val in rhs`, so it can have an optional second lhs. + // `keyVal` triggers if we attempt to parse a second lhs argument (`mhs`). + if lhs == nil { + lhs = p.parseTermRelation(nil, offset) + } + if lhs != nil { + if keyVal && p.s.tok == tokens.Comma { // second "lhs", or "middle hand side" + s := p.save() + p.scan() + if mhs := p.parseTermRelation(nil, offset); mhs != nil { + + if op := p.parseTermOpName(memberWithKeyRef, tokens.In); op != nil { + if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, mhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.In: + return p.parseTermIn(call, keyVal, offset) + default: + return call + } + } + } + } + p.restore(s) + } + if op := p.parseTermOpName(memberRef, tokens.In); op != nil { + if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.In: + return p.parseTermIn(call, keyVal, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTermRelation(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTermOr(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte); op != nil { + if rhs := p.parseTermOr(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte: + return p.parseTermRelation(call, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTermOr(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTermAnd(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.Or); op != nil { + if rhs := p.parseTermAnd(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Or: + return p.parseTermOr(call, offset) + default: + return call + } + } + } + return lhs + } + return nil +} + +func (p *Parser) parseTermAnd(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTermArith(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.And); op != nil { + if rhs := p.parseTermArith(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.And: + return p.parseTermAnd(call, offset) + default: + return call + } + } + } + return lhs + } + return nil +} + +func (p *Parser) parseTermArith(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTermFactor(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.Add, tokens.Sub); op != nil { + if rhs := p.parseTermFactor(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Add, tokens.Sub: + return p.parseTermArith(call, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTermFactor(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTerm() + } + if lhs != nil { + if op := p.parseTermOp(tokens.Mul, tokens.Quo, tokens.Rem); op != nil { + if rhs := p.parseTerm(); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Mul, tokens.Quo, tokens.Rem: + return p.parseTermFactor(call, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTerm() *Term { + if term, s := p.parsedTermCacheLookup(); s != nil { + p.restore(s) + return term + } + s0 := p.save() + + var term *Term + switch p.s.tok { + case tokens.Null: + term = NullTerm().SetLocation(p.s.Loc()) + case tokens.True: + term = BooleanTerm(true).SetLocation(p.s.Loc()) + case tokens.False: + term = BooleanTerm(false).SetLocation(p.s.Loc()) + case tokens.Sub, tokens.Dot, tokens.Number: + term = p.parseNumber() + case tokens.String: + term = p.parseString() + case tokens.Ident, tokens.Contains: // NOTE(sr): contains anywhere BUT in rule heads gets no special treatment + term = p.parseVar() + case tokens.LBrack: + term = p.parseArray() + case tokens.LBrace: + term = p.parseSetOrObject() + case tokens.LParen: + offset := p.s.loc.Offset + p.scan() + if r := p.parseTermInfixCall(); r != nil { + if p.s.tok == tokens.RParen { + r.Location.Text = p.s.Text(offset, p.s.tokEnd) + term = r + } else { + p.error(p.s.Loc(), "non-terminated expression") + } + } + default: + p.illegalToken() + } + + term = p.parseTermFinish(term, false) + p.parsedTermCachePush(term, s0) + return term +} + +func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term { + if head == nil { + return nil + } + offset := p.s.loc.Offset + p.doScan(skipws) + + switch p.s.tok { + case tokens.LParen, tokens.Dot, tokens.LBrack: + return p.parseRef(head, offset) + case tokens.Whitespace: + p.scan() + fallthrough + default: + if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) { + return RefTerm(head).SetLocation(head.Location) + } + return head + } +} + +func (p *Parser) parseNumber() *Term { + var prefix string + loc := p.s.Loc() + if p.s.tok == tokens.Sub { + prefix = "-" + p.scan() + switch p.s.tok { + case tokens.Number, tokens.Dot: + break + default: + p.illegal("expected number") + return nil + } + } + if p.s.tok == tokens.Dot { + prefix += "." + p.scan() + if p.s.tok != tokens.Number { + p.illegal("expected number") + return nil + } + } + + // Check for multiple leading 0's, parsed by math/big.Float.Parse as decimal 0: + // https://golang.org/pkg/math/big/#Float.Parse + if ((len(prefix) != 0 && prefix[0] == '-') || len(prefix) == 0) && + len(p.s.lit) > 1 && p.s.lit[0] == '0' && p.s.lit[1] == '0' { + p.illegal("expected number") + return nil + } + + // Ensure that the number is valid + s := prefix + p.s.lit + f, ok := new(big.Float).SetString(s) + if !ok { + p.illegal("invalid float") + return nil + } + + // Put limit on size of exponent to prevent non-linear cost of String() + // function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068 + // + // n == sign * mantissa * 2^exp + // 0.5 <= mantissa < 1.0 + // + // The limit is arbitrary. + exp := f.MantExp(nil) + if exp > 1e5 || exp < -1e5 || f.IsInf() { // +/- inf, exp is 0 + p.error(p.s.Loc(), "number too big") + return nil + } + + // Note: Use the original string, do *not* round trip from + // the big.Float as it can cause precision loss. + return NumberTerm(json.Number(s)).SetLocation(loc) +} + +func (p *Parser) parseString() *Term { + if p.s.lit[0] == '"' { + if p.s.lit == "\"\"" { + return NewTerm(InternedEmptyString.Value).SetLocation(p.s.Loc()) + } + + var s string + err := json.Unmarshal([]byte(p.s.lit), &s) + if err != nil { + p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit) + return nil + } + term := StringTerm(s).SetLocation(p.s.Loc()) + return term + } + return p.parseRawString() +} + +func (p *Parser) parseRawString() *Term { + if len(p.s.lit) < 2 { + return nil + } + term := StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc()) + return term +} + +// this is the name to use for instantiating an empty set, e.g., `set()`. +var setConstructor = RefTerm(VarTerm("set")) + +func (p *Parser) parseCall(operator *Term, offset int) (term *Term) { + + loc := operator.Location + var end int + + defer func() { + p.setLoc(term, loc, offset, end) + }() + + p.scan() // steps over '(' + + if p.s.tok == tokens.RParen { // no args, i.e. set() or any.func() + end = p.s.tokEnd + p.scanWS() + if operator.Equal(setConstructor) { + return SetTerm() + } + return CallTerm(operator) + } + + if r := p.parseTermList(tokens.RParen, []*Term{operator}); r != nil { + end = p.s.tokEnd + p.scanWS() + return CallTerm(r...) + } + + return nil +} + +func (p *Parser) parseRef(head *Term, offset int) (term *Term) { + + loc := head.Location + var end int + + defer func() { + p.setLoc(term, loc, offset, end) + }() + + switch h := head.Value.(type) { + case Var, *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: + // ok + default: + p.errorf(loc, "illegal ref (head cannot be %v)", ValueName(h)) + } + + ref := []*Term{head} + + for { + switch p.s.tok { + case tokens.Dot: + p.scanWS() + if p.s.tok != tokens.Ident { + p.illegal("expected %v", tokens.Ident) + return nil + } + ref = append(ref, StringTerm(p.s.lit).SetLocation(p.s.Loc())) + p.scanWS() + case tokens.LParen: + term = p.parseCall(p.setLoc(RefTerm(ref...), loc, offset, p.s.loc.Offset), offset) + if term != nil { + switch p.s.tok { + case tokens.Whitespace: + p.scan() + end = p.s.lastEnd + return term + case tokens.Dot, tokens.LBrack: + term = p.parseRef(term, offset) + } + } + end = p.s.tokEnd + return term + case tokens.LBrack: + p.scan() + if term := p.parseTermInfixCall(); term != nil { + if p.s.tok != tokens.RBrack { + p.illegal("expected %v", tokens.LBrack) + return nil + } + ref = append(ref, term) + p.scanWS() + } else { + return nil + } + case tokens.Whitespace: + end = p.s.lastEnd + p.scan() + return RefTerm(ref...) + default: + end = p.s.lastEnd + return RefTerm(ref...) + } + } +} + +func (p *Parser) parseArray() (term *Term) { + + loc := p.s.Loc() + offset := p.s.loc.Offset + + defer func() { + p.setLoc(term, loc, offset, p.s.tokEnd) + }() + + p.scan() + + if p.s.tok == tokens.RBrack { + return ArrayTerm() + } + + potentialComprehension := true + + // Skip leading commas, eg [, x, y] + // Supported for backwards compatibility. In the future + // we should make this a parse error. + if p.s.tok == tokens.Comma { + potentialComprehension = false + p.scan() + } + + s := p.save() + + // NOTE(tsandall): The parser cannot attempt a relational term here because + // of ambiguity around comprehensions. For example, given: + // + // {1 | 1} + // + // Does this represent a set comprehension or a set containing binary OR + // call? We resolve the ambiguity by prioritizing comprehensions. + head := p.parseTerm() + + if head == nil { + return nil + } + + switch p.s.tok { + case tokens.RBrack: + return ArrayTerm(head) + case tokens.Comma: + p.scan() + if terms := p.parseTermList(tokens.RBrack, []*Term{head}); terms != nil { + return NewTerm(NewArray(terms...)) + } + return nil + case tokens.Or: + if potentialComprehension { + // Try to parse as if it is an array comprehension + p.scan() + if body := p.parseBody(tokens.RBrack); body != nil { + return ArrayComprehensionTerm(head, body) + } + if p.s.tok != tokens.Comma { + return nil + } + } + // fall back to parsing as a normal array definition + } + + p.restore(s) + + if terms := p.parseTermList(tokens.RBrack, nil); terms != nil { + return NewTerm(NewArray(terms...)) + } + return nil +} + +func (p *Parser) parseSetOrObject() (term *Term) { + loc := p.s.Loc() + offset := p.s.loc.Offset + + defer func() { + p.setLoc(term, loc, offset, p.s.tokEnd) + }() + + p.scan() + + if p.s.tok == tokens.RBrace { + return ObjectTerm() + } + + potentialComprehension := true + + // Skip leading commas, eg {, x, y} + // Supported for backwards compatibility. In the future + // we should make this a parse error. + if p.s.tok == tokens.Comma { + potentialComprehension = false + p.scan() + } + + s := p.save() + + // Try parsing just a single term first to give comprehensions higher + // priority to "or" calls in ambiguous situations. Eg: { a | b } + // will be a set comprehension. + // + // Note: We don't know yet if it is a set or object being defined. + head := p.parseTerm() + if head == nil { + return nil + } + + switch p.s.tok { + case tokens.Or: + if potentialComprehension { + return p.parseSet(s, head, potentialComprehension) + } + case tokens.RBrace, tokens.Comma: + return p.parseSet(s, head, potentialComprehension) + case tokens.Colon: + return p.parseObject(head, potentialComprehension) + } + + p.restore(s) + + head = p.parseTermInfixCallInList() + if head == nil { + return nil + } + + switch p.s.tok { + case tokens.RBrace, tokens.Comma: + return p.parseSet(s, head, false) + case tokens.Colon: + // It still might be an object comprehension, eg { a+1: b | ... } + return p.parseObject(head, potentialComprehension) + } + + p.illegal("non-terminated set") + return nil +} + +func (p *Parser) parseSet(s *state, head *Term, potentialComprehension bool) *Term { + switch p.s.tok { + case tokens.RBrace: + return SetTerm(head) + case tokens.Comma: + p.scan() + if terms := p.parseTermList(tokens.RBrace, []*Term{head}); terms != nil { + return SetTerm(terms...) + } + case tokens.Or: + if potentialComprehension { + // Try to parse as if it is a set comprehension + p.scan() + if body := p.parseBody(tokens.RBrace); body != nil { + return SetComprehensionTerm(head, body) + } + if p.s.tok != tokens.Comma { + return nil + } + } + // Fall back to parsing as normal set definition + p.restore(s) + if terms := p.parseTermList(tokens.RBrace, nil); terms != nil { + return SetTerm(terms...) + } + } + return nil +} + +func (p *Parser) parseObject(k *Term, potentialComprehension bool) *Term { + // NOTE(tsandall): Assumption: this function is called after parsing the key + // of the head element and then receiving a colon token from the scanner. + // Advance beyond the colon and attempt to parse an object. + if p.s.tok != tokens.Colon { + panic("expected colon") + } + p.scan() + + s := p.save() + + // NOTE(sr): We first try to parse the value as a term (`v`), and see + // if we can parse `{ x: v | ...}` as a comprehension. + // However, if we encounter either a Comma or an RBace, it cannot be + // parsed as a comprehension -- so we save double work further down + // where `parseObjectFinish(k, v, false)` would only exercise the + // same code paths once more. + v := p.parseTerm() + if v == nil { + return nil + } + + potentialRelation := true + if potentialComprehension { + switch p.s.tok { + case tokens.RBrace, tokens.Comma: + potentialRelation = false + fallthrough + case tokens.Or: + if term := p.parseObjectFinish(k, v, true); term != nil { + return term + } + } + } + + p.restore(s) + + if potentialRelation { + v := p.parseTermInfixCallInList() + if v == nil { + return nil + } + + switch p.s.tok { + case tokens.RBrace, tokens.Comma: + return p.parseObjectFinish(k, v, false) + } + } + + p.illegal("non-terminated object") + return nil +} + +func (p *Parser) parseObjectFinish(key, val *Term, potentialComprehension bool) *Term { + switch p.s.tok { + case tokens.RBrace: + return ObjectTerm([2]*Term{key, val}) + case tokens.Or: + if potentialComprehension { + p.scan() + if body := p.parseBody(tokens.RBrace); body != nil { + return ObjectComprehensionTerm(key, val, body) + } + } else { + p.illegal("non-terminated object") + } + case tokens.Comma: + p.scan() + if r := p.parseTermPairList(tokens.RBrace, [][2]*Term{{key, val}}); r != nil { + return ObjectTerm(r...) + } + } + return nil +} + +func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term { + if p.s.tok == end { + return r + } + for { + term := p.parseTermInfixCallInList() + if term != nil { + r = append(r, term) + switch p.s.tok { + case end: + return r + case tokens.Comma: + p.scan() + if p.s.tok == end { + return r + } + continue + default: + p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) + return nil + } + } + return nil + } +} + +func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term { + if p.s.tok == end { + return r + } + for { + key := p.parseTermInfixCallInList() + if key != nil { + switch p.s.tok { + case tokens.Colon: + p.scan() + if val := p.parseTermInfixCallInList(); val != nil { + r = append(r, [2]*Term{key, val}) + switch p.s.tok { + case end: + return r + case tokens.Comma: + p.scan() + if p.s.tok == end { + return r + } + continue + default: + p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) + return nil + } + } + default: + p.illegal(fmt.Sprintf("expected %q", tokens.Colon)) + return nil + } + } + return nil + } +} + +func (p *Parser) parseTermOp(values ...tokens.Token) *Term { + for i := range values { + if p.s.tok == values[i] { + r := RefTerm(VarTerm(p.s.tok.String()).SetLocation(p.s.Loc())).SetLocation(p.s.Loc()) + p.scan() + return r + } + } + return nil +} + +func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term { + for i := range values { + if p.s.tok == values[i] { + cp := ref.Copy() + for _, r := range cp { + r.SetLocation(p.s.Loc()) + } + t := RefTerm(cp...) + t.SetLocation(p.s.Loc()) + p.scan() + return t + } + } + return nil +} + +func (p *Parser) parseVar() *Term { + + s := p.s.lit + + term := VarTerm(s).SetLocation(p.s.Loc()) + + // Update wildcard values with unique identifiers + if term.Equal(Wildcard) { + term.Value = Var(p.genwildcard()) + } + + return term +} + +func (p *Parser) genwildcard() string { + c := p.s.wildcard + p.s.wildcard++ + return fmt.Sprintf("%v%d", WildcardPrefix, c) +} + +func (p *Parser) error(loc *location.Location, reason string) { + p.errorf(loc, reason) //nolint:govet +} + +func (p *Parser) errorf(loc *location.Location, f string, a ...interface{}) { + msg := strings.Builder{} + msg.WriteString(fmt.Sprintf(f, a...)) + + switch len(p.s.hints) { + case 0: // nothing to do + case 1: + msg.WriteString(" (hint: ") + msg.WriteString(p.s.hints[0]) + msg.WriteRune(')') + default: + msg.WriteString(" (hints: ") + for i, h := range p.s.hints { + if i > 0 { + msg.WriteString(", ") + } + msg.WriteString(h) + } + msg.WriteRune(')') + } + + p.s.errors = append(p.s.errors, &Error{ + Code: ParseErr, + Message: msg.String(), + Location: loc, + Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset), + }) + p.s.hints = nil +} + +func (p *Parser) hint(f string, a ...interface{}) { + p.s.hints = append(p.s.hints, fmt.Sprintf(f, a...)) +} + +func (p *Parser) illegal(note string, a ...interface{}) { + tok := p.s.tok.String() + + if p.s.tok == tokens.Illegal { + p.errorf(p.s.Loc(), "illegal token") + return + } + + tokType := "token" + if tokens.IsKeyword(p.s.tok) { + tokType = "keyword" + } else if _, ok := allFutureKeywords[p.s.tok.String()]; ok { + tokType = "keyword" + } + + note = fmt.Sprintf(note, a...) + if len(note) > 0 { + p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, note) + } else { + p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType) + } +} + +func (p *Parser) illegalToken() { + p.illegal("") +} + +func (p *Parser) scan() { + p.doScan(true) +} + +func (p *Parser) scanWS() { + p.doScan(false) +} + +func (p *Parser) doScan(skipws bool) { + + // NOTE(tsandall): the last position is used to compute the "text" field for + // complex AST nodes. Whitespace never affects the last position of an AST + // node so do not update it when scanning. + if p.s.tok != tokens.Whitespace { + p.s.lastEnd = p.s.tokEnd + p.s.skippedNL = false + } + + var errs []scanner.Error + for { + var pos scanner.Position + p.s.tok, pos, p.s.lit, errs = p.s.s.Scan() + + p.s.tokEnd = pos.End + p.s.loc.Row = pos.Row + p.s.loc.Col = pos.Col + p.s.loc.Offset = pos.Offset + p.s.loc.Text = p.s.Text(pos.Offset, pos.End) + p.s.loc.Tabs = pos.Tabs + + for _, err := range errs { + p.error(p.s.Loc(), err.Message) + } + + if len(errs) > 0 { + p.s.tok = tokens.Illegal + } + + if p.s.tok == tokens.Whitespace { + if p.s.lit == "\n" { + p.s.skippedNL = true + } + if skipws { + continue + } + } + + if p.s.tok != tokens.Comment { + break + } + + // For backwards compatibility leave a nil + // Text value if there is no text rather than + // an empty string. + var commentText []byte + if len(p.s.lit) > 1 { + commentText = []byte(p.s.lit[1:]) + } + comment := NewComment(commentText) + comment.SetLoc(p.s.Loc()) + p.s.comments = append(p.s.comments, comment) + } +} + +func (p *Parser) save() *state { + cpy := *p.s + s := *cpy.s + cpy.s = &s + return &cpy +} + +func (p *Parser) restore(s *state) { + p.s = s +} + +func setLocRecursive(x interface{}, loc *location.Location) { + NewGenericVisitor(func(x interface{}) bool { + if node, ok := x.(Node); ok { + node.SetLoc(loc) + } + return false + }).Walk(x) +} + +func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term { + if term != nil { + cpy := *loc + term.Location = &cpy + term.Location.Text = p.s.Text(offset, end) + } + return term +} + +func (p *Parser) validateDefaultRuleValue(rule *Rule) bool { + if rule.Head.Value == nil { + p.error(rule.Loc(), "illegal default rule (must have a value)") + return false + } + + valid := true + vis := NewGenericVisitor(func(x interface{}) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures + return true + case Ref, Var, Call: + p.error(rule.Loc(), fmt.Sprintf("illegal default rule (value cannot contain %v)", TypeName(x))) + valid = false + return true + } + return false + }) + + vis.Walk(rule.Head.Value.Value) + return valid +} + +func (p *Parser) validateDefaultRuleArgs(rule *Rule) bool { + + valid := true + vars := NewVarSet() + + vis := NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case Var: + if vars.Contains(x) { + p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot be repeated %v)", x)) + valid = false + return true + } + vars.Add(x) + + case *Term: + switch v := x.Value.(type) { + case Var: // do nothing + default: + p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot contain %v)", ValueName(v))) + valid = false + return true + } + } + + return false + }) + + vis.Walk(rule.Head.Args) + return valid +} + +// We explicitly use yaml unmarshalling, to accommodate for the '_' in 'related_resources', +// which isn't handled properly by json for some reason. +type rawAnnotation struct { + Scope string `yaml:"scope"` + Title string `yaml:"title"` + Entrypoint bool `yaml:"entrypoint"` + Description string `yaml:"description"` + Organizations []string `yaml:"organizations"` + RelatedResources []interface{} `yaml:"related_resources"` + Authors []interface{} `yaml:"authors"` + Schemas []map[string]any `yaml:"schemas"` + Custom map[string]interface{} `yaml:"custom"` +} + +type metadataParser struct { + buf *bytes.Buffer + comments []*Comment + loc *location.Location +} + +func newMetadataParser(loc *Location) *metadataParser { + return &metadataParser{loc: loc, buf: bytes.NewBuffer(nil)} +} + +func (b *metadataParser) Append(c *Comment) { + b.buf.Write(bytes.TrimPrefix(c.Text, []byte(" "))) + b.buf.WriteByte('\n') + b.comments = append(b.comments, c) +} + +var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`) + +func (b *metadataParser) Parse() (*Annotations, error) { + + var raw rawAnnotation + + if len(bytes.TrimSpace(b.buf.Bytes())) == 0 { + return nil, errors.New("expected METADATA block, found whitespace") + } + + if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil { + var comment *Comment + match := yamlLineErrRegex.FindStringSubmatch(err.Error()) + if len(match) == 2 { + index, err2 := strconv.Atoi(match[1]) + if err2 == nil { + if index >= len(b.comments) { + comment = b.comments[len(b.comments)-1] + } else { + comment = b.comments[index] + } + b.loc = comment.Location + } + } + + if match == nil && len(b.comments) > 0 { + b.loc = b.comments[0].Location + } + + return nil, augmentYamlError(err, b.comments) + } + + var result Annotations + result.comments = b.comments + result.Scope = raw.Scope + result.Entrypoint = raw.Entrypoint + result.Title = raw.Title + result.Description = raw.Description + result.Organizations = raw.Organizations + + for _, v := range raw.RelatedResources { + rr, err := parseRelatedResource(v) + if err != nil { + return nil, fmt.Errorf("invalid related-resource definition %s: %w", v, err) + } + result.RelatedResources = append(result.RelatedResources, rr) + } + + for _, pair := range raw.Schemas { + k, v := unwrapPair(pair) + + var a SchemaAnnotation + var err error + + a.Path, err = ParseRef(k) + if err != nil { + return nil, errors.New("invalid document reference") + } + + switch v := v.(type) { + case string: + a.Schema, err = parseSchemaRef(v) + if err != nil { + return nil, err + } + case map[string]any: + w, err := convertYAMLMapKeyTypes(v, nil) + if err != nil { + return nil, fmt.Errorf("invalid schema definition: %w", err) + } + a.Definition = &w + default: + return nil, fmt.Errorf("invalid schema declaration for path %q", k) + } + + result.Schemas = append(result.Schemas, &a) + } + + for _, v := range raw.Authors { + author, err := parseAuthor(v) + if err != nil { + return nil, fmt.Errorf("invalid author definition %s: %w", v, err) + } + result.Authors = append(result.Authors, author) + } + + result.Custom = make(map[string]interface{}) + for k, v := range raw.Custom { + val, err := convertYAMLMapKeyTypes(v, nil) + if err != nil { + return nil, err + } + result.Custom[k] = val + } + + result.Location = b.loc + + // recreate original text of entire metadata block for location text attribute + sb := strings.Builder{} + sb.WriteString("# METADATA\n") + + lines := bytes.Split(b.buf.Bytes(), []byte{'\n'}) + + for _, line := range lines[:len(lines)-1] { + sb.WriteString("# ") + sb.Write(line) + sb.WriteByte('\n') + } + + result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n")) + + return &result, nil +} + +// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise +// cryptic error. These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed +// to be correct. +func augmentYamlError(err error, comments []*Comment) error { + // Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol + for _, comment := range comments { + txt := string(comment.Text) + parts := strings.Split(txt, ":") + if len(parts) > 1 { + parts = parts[1:] + var invalidSpaces []string + for partIndex, part := range parts { + if len(part) == 0 && partIndex == len(parts)-1 { + invalidSpaces = []string{} + break + } + + r, _ := utf8.DecodeRuneInString(part) + if r == ' ' || r == '\t' { + invalidSpaces = []string{} + break + } + + invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r)) + } + if len(invalidSpaces) > 0 { + err = fmt.Errorf( + "%s\n Hint: on line %d, symbol(s) %v immediately following a key/value separator ':' is not a legal yaml space character", + err.Error(), comment.Location.Row, invalidSpaces) + } + } + } + return err +} + +func unwrapPair(pair map[string]interface{}) (string, interface{}) { + for k, v := range pair { + return k, v + } + return "", nil +} + +var errInvalidSchemaRef = errors.New("invalid schema reference") + +// NOTE(tsandall): 'schema' is not registered as a root because it's not +// supported by the compiler or evaluator today. Once we fix that, we can remove +// this function. +func parseSchemaRef(s string) (Ref, error) { + + term, err := ParseTerm(s) + if err == nil { + switch v := term.Value.(type) { + case Var: + if term.Equal(SchemaRootDocument) { + return SchemaRootRef.Copy(), nil + } + case Ref: + if v.HasPrefix(SchemaRootRef) { + return v, nil + } + } + } + + return nil, errInvalidSchemaRef +} + +func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) { + rr, err := convertYAMLMapKeyTypes(rr, nil) + if err != nil { + return nil, err + } + + switch rr := rr.(type) { + case string: + if len(rr) > 0 { + u, err := url.Parse(rr) + if err != nil { + return nil, err + } + return &RelatedResourceAnnotation{Ref: *u}, nil + } + return nil, errors.New("ref URL may not be empty string") + case map[string]interface{}: + description := strings.TrimSpace(getSafeString(rr, "description")) + ref := strings.TrimSpace(getSafeString(rr, "ref")) + if len(ref) > 0 { + u, err := url.Parse(ref) + if err != nil { + return nil, err + } + return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil + } + return nil, errors.New("'ref' value required in object") + } + + return nil, errors.New("invalid value type, must be string or map") +} + +func parseAuthor(a interface{}) (*AuthorAnnotation, error) { + a, err := convertYAMLMapKeyTypes(a, nil) + if err != nil { + return nil, err + } + + switch a := a.(type) { + case string: + return parseAuthorString(a) + case map[string]interface{}: + name := strings.TrimSpace(getSafeString(a, "name")) + email := strings.TrimSpace(getSafeString(a, "email")) + if len(name) > 0 || len(email) > 0 { + return &AuthorAnnotation{name, email}, nil + } + return nil, errors.New("'name' and/or 'email' values required in object") + } + + return nil, errors.New("invalid value type, must be string or map") +} + +func getSafeString(m map[string]interface{}, k string) string { + if v, found := m[k]; found { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +const emailPrefix = "<" +const emailSuffix = ">" + +// parseAuthor parses a string into an AuthorAnnotation. If the last word of the input string is enclosed within <>, +// it is extracted as the author's email. The email may not contain whitelines, as it then will be interpreted as +// multiple words. +func parseAuthorString(s string) (*AuthorAnnotation, error) { + parts := strings.Fields(s) + + if len(parts) == 0 { + return nil, errors.New("author is an empty string") + } + + namePartCount := len(parts) + trailing := parts[namePartCount-1] + var email string + if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) && + strings.HasSuffix(trailing, emailSuffix) { + email = trailing[len(emailPrefix):] + email = email[0 : len(email)-len(emailSuffix)] + namePartCount -= 1 + } + + name := strings.Join(parts[0:namePartCount], " ") + + return &AuthorAnnotation{Name: name, Email: email}, nil +} + +func convertYAMLMapKeyTypes(x any, path []string) (any, error) { + var err error + switch x := x.(type) { + case map[any]any: + result := make(map[string]any, len(x)) + for k, v := range x { + str, ok := k.(string) + if !ok { + return nil, fmt.Errorf("invalid map key type(s): %v", strings.Join(path, "/")) + } + result[str], err = convertYAMLMapKeyTypes(v, append(path, str)) + if err != nil { + return nil, err + } + } + return result, nil + case []any: + for i := range x { + x[i], err = convertYAMLMapKeyTypes(x[i], append(path, strconv.Itoa(i))) + if err != nil { + return nil, err + } + } + return x, nil + default: + return x, nil + } +} + +// futureKeywords is the source of truth for future keywords that will +// eventually become standard keywords inside of Rego. +var futureKeywords = map[string]tokens.Token{} + +// futureKeywordsV0 is the source of truth for future keywords that were +// not yet a standard part of Rego in v0, and required importing. +var futureKeywordsV0 = map[string]tokens.Token{ + "in": tokens.In, + "every": tokens.Every, + "contains": tokens.Contains, + "if": tokens.If, +} + +var allFutureKeywords map[string]tokens.Token + +func IsFutureKeyword(s string) bool { + return IsFutureKeywordForRegoVersion(s, RegoV1) +} + +func IsFutureKeywordForRegoVersion(s string, v RegoVersion) bool { + var yes bool + + switch v { + case RegoV0, RegoV0CompatV1: + _, yes = futureKeywordsV0[s] + case RegoV1: + _, yes = futureKeywords[s] + } + + return yes +} + +func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { + path := imp.Path.Value.(Ref) + + if len(path) == 1 || !path[1].Equal(keywordsTerm) { + p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`") + return + } + + if imp.Alias != "" { + p.errorf(imp.Path.Location, "`future` imports cannot be aliased") + return + } + + kwds := make([]string, 0, len(allowedFutureKeywords)) + for k := range allowedFutureKeywords { + kwds = append(kwds, k) + } + + switch len(path) { + case 2: // all keywords imported, nothing to do + case 3: // one keyword imported + kw, ok := path[2].Value.(String) + if !ok { + p.errorf(imp.Path.Location, "invalid import, must be `future.keywords.x`, e.g. `import future.keywords.in`") + return + } + keyword := string(kw) + _, ok = allowedFutureKeywords[keyword] + if !ok { + sort.Strings(kwds) // so the error message is stable + p.errorf(imp.Path.Location, "unexpected keyword, must be one of %v", kwds) + return + } + + kwds = []string{keyword} // overwrite + } + for _, kw := range kwds { + p.s.s.AddKeyword(kw, allowedFutureKeywords[kw]) + } +} + +func (p *Parser) regoV1Import(imp *Import) { + if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) && !p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef) + return + } + + path := imp.Path.Value.(Ref) + + // v1 is only valid option + if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 { + p.errorf(imp.Path.Location, "invalid import `%s`, must be `%s`", path, RegoV1CompatibleRef) + return + } + + if p.po.EffectiveRegoVersion() == RegoV1 { + // We're parsing for Rego v1, where the 'rego.v1' import is a no-op. + return + } + + if imp.Alias != "" { + p.errorf(imp.Path.Location, "`rego` imports cannot be aliased") + return + } + + // import all future keywords with the rego.v1 import + kwds := make([]string, 0, len(futureKeywordsV0)) + for k := range futureKeywordsV0 { + kwds = append(kwds, k) + } + + p.s.s.SetRegoV1Compatible() + for _, kw := range kwds { + p.s.s.AddKeyword(kw, futureKeywordsV0[kw]) + } +} + +func init() { + allFutureKeywords = map[string]tokens.Token{} + for k, v := range futureKeywords { + allFutureKeywords[k] = v + } + for k, v := range futureKeywordsV0 { + allFutureKeywords[k] = v + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go new file mode 100644 index 00000000000..dec06f19692 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go @@ -0,0 +1,821 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// This file contains extra functions for parsing Rego. +// Most of the parsing is handled by the code in parser.go, +// however, there are additional utilities that are +// helpful for dealing with Rego source inputs (e.g., REPL +// statements, source files, etc.) + +package ast + +import ( + "bytes" + "errors" + "fmt" + "strings" + "unicode" + + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" +) + +// MustParseBody returns a parsed body. +// If an error occurs during parsing, panic. +func MustParseBody(input string) Body { + return MustParseBodyWithOpts(input, ParserOptions{}) +} + +// MustParseBodyWithOpts returns a parsed body. +// If an error occurs during parsing, panic. +func MustParseBodyWithOpts(input string, opts ParserOptions) Body { + parsed, err := ParseBodyWithOpts(input, opts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseExpr returns a parsed expression. +// If an error occurs during parsing, panic. +func MustParseExpr(input string) *Expr { + parsed, err := ParseExpr(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseImports returns a slice of imports. +// If an error occurs during parsing, panic. +func MustParseImports(input string) []*Import { + parsed, err := ParseImports(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseModule returns a parsed module. +// If an error occurs during parsing, panic. +func MustParseModule(input string) *Module { + return MustParseModuleWithOpts(input, ParserOptions{}) +} + +// MustParseModuleWithOpts returns a parsed module. +// If an error occurs during parsing, panic. +func MustParseModuleWithOpts(input string, opts ParserOptions) *Module { + parsed, err := ParseModuleWithOpts("", input, opts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParsePackage returns a Package. +// If an error occurs during parsing, panic. +func MustParsePackage(input string) *Package { + parsed, err := ParsePackage(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseStatements returns a slice of parsed statements. +// If an error occurs during parsing, panic. +func MustParseStatements(input string) []Statement { + parsed, _, err := ParseStatements("", input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseStatement returns exactly one statement. +// If an error occurs during parsing, panic. +func MustParseStatement(input string) Statement { + parsed, err := ParseStatement(input) + if err != nil { + panic(err) + } + return parsed +} + +func MustParseStatementWithOpts(input string, popts ParserOptions) Statement { + parsed, err := ParseStatementWithOpts(input, popts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseRef returns a parsed reference. +// If an error occurs during parsing, panic. +func MustParseRef(input string) Ref { + parsed, err := ParseRef(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseRule returns a parsed rule. +// If an error occurs during parsing, panic. +func MustParseRule(input string) *Rule { + parsed, err := ParseRule(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseRuleWithOpts returns a parsed rule. +// If an error occurs during parsing, panic. +func MustParseRuleWithOpts(input string, opts ParserOptions) *Rule { + parsed, err := ParseRuleWithOpts(input, opts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseTerm returns a parsed term. +// If an error occurs during parsing, panic. +func MustParseTerm(input string) *Term { + parsed, err := ParseTerm(input) + if err != nil { + panic(err) + } + return parsed +} + +// ParseRuleFromBody returns a rule if the body can be interpreted as a rule +// definition. Otherwise, an error is returned. +func ParseRuleFromBody(module *Module, body Body) (*Rule, error) { + + if len(body) != 1 { + return nil, errors.New("multiple expressions cannot be used for rule head") + } + + return ParseRuleFromExpr(module, body[0]) +} + +// ParseRuleFromExpr returns a rule if the expression can be interpreted as a +// rule definition. +func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) { + + if len(expr.With) > 0 { + return nil, errors.New("expressions using with keyword cannot be used for rule head") + } + + if expr.Negated { + return nil, errors.New("negated expressions cannot be used for rule head") + } + + if _, ok := expr.Terms.(*SomeDecl); ok { + return nil, errors.New("'some' declarations cannot be used for rule head") + } + + if term, ok := expr.Terms.(*Term); ok { + switch v := term.Value.(type) { + case Ref: + if len(v) > 2 { // 2+ dots + return ParseCompleteDocRuleWithDotsFromTerm(module, term) + } + return ParsePartialSetDocRuleFromTerm(module, term) + default: + return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(v)) + } + } + + if _, ok := expr.Terms.([]*Term); !ok { + // This is a defensive check in case other kinds of expression terms are + // introduced in the future. + return nil, errors.New("expression cannot be used for rule head") + } + + if expr.IsEquality() { + return parseCompleteRuleFromEq(module, expr) + } else if expr.IsAssignment() { + rule, err := parseCompleteRuleFromEq(module, expr) + if err != nil { + return nil, err + } + rule.Head.Assign = true + return rule, nil + } + + if _, ok := BuiltinMap[expr.Operator().String()]; ok { + return nil, errors.New("rule name conflicts with built-in function") + } + + return ParseRuleFromCallExpr(module, expr.Terms.([]*Term)) +} + +func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) { + + // ensure the rule location is set to the expr location + // the helper functions called below try to set the location based + // on the terms they've been provided but that is not as accurate. + defer func() { + if rule != nil { + rule.Location = expr.Location + rule.Head.Location = expr.Location + } + }() + + lhs, rhs := expr.Operand(0), expr.Operand(1) + if lhs == nil || rhs == nil { + return nil, errors.New("assignment requires two operands") + } + + rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs) + if err == nil { + return rule, nil + } + + rule, err = ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs) + if err == nil { + return rule, nil + } + + return ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) +} + +// ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can +// be interpreted as a complete document definition declared with the assignment +// operator. +func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + + rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) + if err != nil { + return nil, err + } + + rule.Head.Assign = true + + return rule, nil +} + +// ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be +// interpreted as a complete document definition. +func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + var head *Head + + if v, ok := lhs.Value.(Var); ok { + // Modify the code to add the location to the head ref + head = VarHead(v, lhs.Location, nil) + } else if r, ok := lhs.Value.(Ref); ok { // groundness ? + if _, ok := r[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", r) + } + head = RefHead(r) + if len(r) > 1 && !r[len(r)-1].IsGround() { + return nil, errors.New("ref not ground") + } + } else { + return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(lhs.Value)) + } + head.Value = rhs + head.Location = lhs.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + + return &Rule{ + Location: lhs.Location, + Head: head, + Body: body, + Module: module, + generatedBody: true, + }, nil +} + +func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, error) { + ref, ok := term.Value.(Ref) + if !ok { + return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(term.Value)) + } + + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location)) + head.generatedValue = true + head.Location = term.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) + + return &Rule{ + Location: term.Location, + Head: head, + Body: body, + Module: module, + }, nil +} + +// ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be +// interpreted as a partial object document definition. +func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + ref, ok := lhs.Value.(Ref) + if !ok { + return nil, fmt.Errorf("%v cannot be used as rule name", ValueName(lhs.Value)) + } + + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + + head := RefHead(ref, rhs) + if len(ref) == 2 { // backcompat for naked `foo.bar = "baz"` statements + head.Name = ref[0].Value.(Var) + head.Key = ref[1] + } + head.Location = rhs.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + + rule := &Rule{ + Location: rhs.Location, + Head: head, + Body: body, + Module: module, + } + + return rule, nil +} + +// ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted +// as a partial set document definition. +func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) { + + ref, ok := term.Value.(Ref) + if !ok || len(ref) == 1 { + return nil, fmt.Errorf("%vs cannot be used for rule head", ValueName(term.Value)) + } + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + + head := RefHead(ref) + if len(ref) == 2 { + v, ok := ref[0].Value.(Var) + if !ok { + return nil, fmt.Errorf("%vs cannot be used for rule head", ValueName(term.Value)) + } + // Modify the code to add the location to the head ref + head = VarHead(v, ref[0].Location, nil) + head.Key = ref[1] + } + head.Location = term.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) + + rule := &Rule{ + Location: term.Location, + Head: head, + Body: body, + Module: module, + } + + return rule, nil +} + +// ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a +// function definition (e.g., f(x) = y => f(x) = y { true }). +func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + + call, ok := lhs.Value.(Call) + if !ok { + return nil, errors.New("must be call") + } + + ref, ok := call[0].Value.(Ref) + if !ok { + return nil, fmt.Errorf("%vs cannot be used in function signature", ValueName(call[0].Value)) + } + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + + head := RefHead(ref, rhs) + head.Location = lhs.Location + head.Args = Args(call[1:]) + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + + rule := &Rule{ + Location: lhs.Location, + Head: head, + Body: body, + Module: module, + } + + return rule, nil +} + +// ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a +// function returning true or some value (e.g., f(x) => f(x) = true { true }). +func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) { + + if len(terms) <= 1 { + return nil, errors.New("rule argument list must take at least one argument") + } + + loc := terms[0].Location + ref := terms[0].Value.(Ref) + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + head := RefHead(ref, BooleanTerm(true).SetLocation(loc)) + head.Location = loc + head.Args = terms[1:] + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)) + + rule := &Rule{ + Location: loc, + Head: head, + Module: module, + Body: body, + } + return rule, nil +} + +// ParseImports returns a slice of Import objects. +func ParseImports(input string) ([]*Import, error) { + stmts, _, err := ParseStatements("", input) + if err != nil { + return nil, err + } + result := []*Import{} + for _, stmt := range stmts { + if imp, ok := stmt.(*Import); ok { + result = append(result, imp) + } else { + return nil, fmt.Errorf("expected import but got %T", stmt) + } + } + return result, nil +} + +// ParseModule returns a parsed Module object. +// For details on Module objects and their fields, see policy.go. +// Empty input will return nil, nil. +func ParseModule(filename, input string) (*Module, error) { + return ParseModuleWithOpts(filename, input, ParserOptions{}) +} + +// ParseModuleWithOpts returns a parsed Module object, and has an additional input ParserOptions +// For details on Module objects and their fields, see policy.go. +// Empty input will return nil, nil. +func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module, error) { + stmts, comments, err := ParseStatementsWithOpts(filename, input, popts) + if err != nil { + return nil, err + } + return parseModule(filename, stmts, comments, popts.RegoVersion) +} + +// ParseBody returns exactly one body. +// If multiple bodies are parsed, an error is returned. +func ParseBody(input string) (Body, error) { + return ParseBodyWithOpts(input, ParserOptions{SkipRules: true}) +} + +// ParseBodyWithOpts returns exactly one body. It does _not_ set SkipRules: true on its own, +// but respects whatever ParserOptions it's been given. +func ParseBodyWithOpts(input string, popts ParserOptions) (Body, error) { + + stmts, _, err := ParseStatementsWithOpts("", input, popts) + if err != nil { + return nil, err + } + + result := Body{} + + for _, stmt := range stmts { + switch stmt := stmt.(type) { + case Body: + for i := range stmt { + result.Append(stmt[i]) + } + case *Comment: + // skip + default: + return nil, fmt.Errorf("expected body but got %T", stmt) + } + } + + return result, nil +} + +// ParseExpr returns exactly one expression. +// If multiple expressions are parsed, an error is returned. +func ParseExpr(input string) (*Expr, error) { + body, err := ParseBody(input) + if err != nil { + return nil, fmt.Errorf("failed to parse expression: %w", err) + } + if len(body) != 1 { + return nil, fmt.Errorf("expected exactly one expression but got: %v", body) + } + return body[0], nil +} + +// ParsePackage returns exactly one Package. +// If multiple statements are parsed, an error is returned. +func ParsePackage(input string) (*Package, error) { + stmt, err := ParseStatement(input) + if err != nil { + return nil, err + } + pkg, ok := stmt.(*Package) + if !ok { + return nil, fmt.Errorf("expected package but got %T", stmt) + } + return pkg, nil +} + +// ParseTerm returns exactly one term. +// If multiple terms are parsed, an error is returned. +func ParseTerm(input string) (*Term, error) { + body, err := ParseBody(input) + if err != nil { + return nil, fmt.Errorf("failed to parse term: %w", err) + } + if len(body) != 1 { + return nil, fmt.Errorf("expected exactly one term but got: %v", body) + } + term, ok := body[0].Terms.(*Term) + if !ok { + return nil, fmt.Errorf("expected term but got %v", body[0].Terms) + } + return term, nil +} + +// ParseRef returns exactly one reference. +func ParseRef(input string) (Ref, error) { + term, err := ParseTerm(input) + if err != nil { + return nil, fmt.Errorf("failed to parse ref: %w", err) + } + ref, ok := term.Value.(Ref) + if !ok { + return nil, fmt.Errorf("expected ref but got %v", term) + } + return ref, nil +} + +// ParseRuleWithOpts returns exactly one rule. +// If multiple rules are parsed, an error is returned. +func ParseRuleWithOpts(input string, opts ParserOptions) (*Rule, error) { + stmts, _, err := ParseStatementsWithOpts("", input, opts) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, fmt.Errorf("expected exactly one statement (rule), got %v = %T, %T", stmts, stmts[0], stmts[1]) + } + rule, ok := stmts[0].(*Rule) + if !ok { + return nil, fmt.Errorf("expected rule but got %T", stmts[0]) + } + return rule, nil +} + +// ParseRule returns exactly one rule. +// If multiple rules are parsed, an error is returned. +func ParseRule(input string) (*Rule, error) { + return ParseRuleWithOpts(input, ParserOptions{}) +} + +// ParseStatement returns exactly one statement. +// A statement might be a term, expression, rule, etc. Regardless, +// this function expects *exactly* one statement. If multiple +// statements are parsed, an error is returned. +func ParseStatement(input string) (Statement, error) { + stmts, _, err := ParseStatements("", input) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, errors.New("expected exactly one statement") + } + return stmts[0], nil +} + +func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error) { + stmts, _, err := ParseStatementsWithOpts("", input, popts) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, errors.New("expected exactly one statement") + } + return stmts[0], nil +} + +// ParseStatements is deprecated. Use ParseStatementWithOpts instead. +func ParseStatements(filename, input string) ([]Statement, []*Comment, error) { + return ParseStatementsWithOpts(filename, input, ParserOptions{}) +} + +// ParseStatementsWithOpts returns a slice of parsed statements. This is the +// default return value from the parser. +func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) { + + parser := NewParser(). + WithFilename(filename). + WithReader(bytes.NewBufferString(input)). + WithProcessAnnotation(popts.ProcessAnnotation). + WithFutureKeywords(popts.FutureKeywords...). + WithAllFutureKeywords(popts.AllFutureKeywords). + WithCapabilities(popts.Capabilities). + WithSkipRules(popts.SkipRules). + WithRegoVersion(popts.RegoVersion). + withUnreleasedKeywords(popts.unreleasedKeywords) + + stmts, comments, errs := parser.Parse() + + if len(errs) > 0 { + return nil, nil, errs + } + + return stmts, comments, nil +} + +func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) { + + if len(stmts) == 0 { + return nil, NewError(ParseErr, &Location{File: filename}, "empty module") + } + + var errs Errors + + pkg, ok := stmts[0].(*Package) + if !ok { + loc := stmts[0].Loc() + errs = append(errs, NewError(ParseErr, loc, "package expected")) + } + + mod := &Module{ + Package: pkg, + stmts: stmts, + } + + // The comments slice only holds comments that were not their own statements. + mod.Comments = append(mod.Comments, comments...) + + if regoCompatibilityMode == RegoUndefined { + mod.regoVersion = DefaultRegoVersion + } else { + mod.regoVersion = regoCompatibilityMode + } + + for i, stmt := range stmts[1:] { + switch stmt := stmt.(type) { + case *Import: + mod.Imports = append(mod.Imports, stmt) + if mod.regoVersion == RegoV0 && Compare(stmt.Path.Value, RegoV1CompatibleRef) == 0 { + mod.regoVersion = RegoV0CompatV1 + } + case *Rule: + setRuleModule(stmt, mod) + mod.Rules = append(mod.Rules, stmt) + case Body: + rule, err := ParseRuleFromBody(mod, stmt) + if err != nil { + errs = append(errs, NewError(ParseErr, stmt[0].Location, err.Error())) //nolint:govet + continue + } + rule.generatedBody = true + mod.Rules = append(mod.Rules, rule) + + // NOTE(tsandall): the statement should now be interpreted as a + // rule so update the statement list. This is important for the + // logic below that associates annotations with statements. + stmts[i+1] = rule + case *Package: + errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package")) + case *Annotations: + mod.Annotations = append(mod.Annotations, stmt) + case *Comment: + // Ignore comments, they're handled above. + default: + panic("illegal value") // Indicates grammar is out-of-sync with code. + } + } + + if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 { + for _, rule := range mod.Rules { + for r := rule; r != nil; r = r.Else { + errs = append(errs, CheckRegoV1(r)...) + } + } + } + + if len(errs) > 0 { + return nil, errs + } + + errs = append(errs, attachAnnotationsNodes(mod)...) + + if len(errs) > 0 { + return nil, errs + } + + attachRuleAnnotations(mod) + + return mod, nil +} + +func ruleDeclarationHasKeyword(rule *Rule, keyword tokens.Token) bool { + for _, kw := range rule.Head.keywords { + if kw == keyword { + return true + } + } + return false +} + +func newScopeAttachmentErr(a *Annotations, want string) *Error { + var have string + if a.node != nil { + have = fmt.Sprintf(" (have %v)", TypeName(a.node)) + } + return NewError(ParseErr, a.Loc(), "annotation scope '%v' must be applied to %v%v", a.Scope, want, have) +} + +func setRuleModule(rule *Rule, module *Module) { + rule.Module = module + if rule.Else != nil { + setRuleModule(rule.Else, module) + } +} + +// ParserErrorDetail holds additional details for parser errors. +type ParserErrorDetail struct { + Line string `json:"line"` + Idx int `json:"idx"` +} + +func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail { + + // Find first non-space character at or before offset position. + if offset >= len(bs) { + offset = len(bs) - 1 + } else if offset < 0 { + offset = 0 + } + + for offset > 0 && unicode.IsSpace(rune(bs[offset])) { + offset-- + } + + // Find beginning of line containing offset. + begin := offset + + for begin > 0 && !isNewLineChar(bs[begin]) { + begin-- + } + + if isNewLineChar(bs[begin]) { + begin++ + } + + // Find end of line containing offset. + end := offset + + for end < len(bs) && !isNewLineChar(bs[end]) { + end++ + } + + if begin > end { + begin = end + } + + // Extract line and compute index of offset byte in line. + line := bs[begin:end] + index := offset - begin + + return &ParserErrorDetail{ + Line: string(line), + Idx: index, + } +} + +// Lines returns the pretty formatted line output for the error details. +func (d ParserErrorDetail) Lines() []string { + line := strings.TrimLeft(d.Line, "\t") // remove leading tabs + tabCount := len(d.Line) - len(line) + indent := d.Idx - tabCount + if indent < 0 { + indent = 0 + } + return []string{line, strings.Repeat(" ", indent) + "^"} +} + +func isNewLineChar(b byte) bool { + return b == '\r' || b == '\n' +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go b/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go new file mode 100644 index 00000000000..978de9441b1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go @@ -0,0 +1,2013 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + "encoding/json" + "fmt" + "slices" + "strings" + + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/util" +) + +// DefaultRootDocument is the default root document. +// +// All package directives inside source files are implicitly prefixed with the +// DefaultRootDocument value. +var DefaultRootDocument = VarTerm("data") + +// InputRootDocument names the document containing query arguments. +var InputRootDocument = VarTerm("input") + +// SchemaRootDocument names the document containing external data schemas. +var SchemaRootDocument = VarTerm("schema") + +// FunctionArgRootDocument names the document containing function arguments. +// It's only for internal usage, for referencing function arguments between +// the index and topdown. +var FunctionArgRootDocument = VarTerm("args") + +// FutureRootDocument names the document containing new, to-become-default, +// features. +var FutureRootDocument = VarTerm("future") + +// RegoRootDocument names the document containing new, to-become-default, +// features in a future versioned release. +var RegoRootDocument = VarTerm("rego") + +// RootDocumentNames contains the names of top-level documents that can be +// referred to in modules and queries. +// +// Note, the schema document is not currently implemented in the evaluator so it +// is not registered as a root document name (yet). +var RootDocumentNames = NewSet( + DefaultRootDocument, + InputRootDocument, +) + +// DefaultRootRef is a reference to the root of the default document. +// +// All refs to data in the policy engine's storage layer are prefixed with this ref. +var DefaultRootRef = Ref{DefaultRootDocument} + +// InputRootRef is a reference to the root of the input document. +// +// All refs to query arguments are prefixed with this ref. +var InputRootRef = Ref{InputRootDocument} + +// SchemaRootRef is a reference to the root of the schema document. +// +// All refs to schema documents are prefixed with this ref. Note, the schema +// document is not currently implemented in the evaluator so it is not +// registered as a root document ref (yet). +var SchemaRootRef = Ref{SchemaRootDocument} + +// RootDocumentRefs contains the prefixes of top-level documents that all +// non-local references start with. +var RootDocumentRefs = NewSet( + NewTerm(DefaultRootRef), + NewTerm(InputRootRef), +) + +// SystemDocumentKey is the name of the top-level key that identifies the system +// document. +const SystemDocumentKey = String("system") + +// ReservedVars is the set of names that refer to implicitly ground vars. +var ReservedVars = NewVarSet( + DefaultRootDocument.Value.(Var), + InputRootDocument.Value.(Var), +) + +// Wildcard represents the wildcard variable as defined in the language. +var Wildcard = &Term{Value: Var("_")} + +// WildcardPrefix is the special character that all wildcard variables are +// prefixed with when the statement they are contained in is parsed. +const WildcardPrefix = "$" + +// Keywords contains strings that map to language keywords. +var Keywords = KeywordsForRegoVersion(DefaultRegoVersion) + +var KeywordsV0 = [...]string{ + "not", + "package", + "import", + "as", + "default", + "else", + "with", + "null", + "true", + "false", + "some", +} + +var KeywordsV1 = [...]string{ + "not", + "package", + "import", + "as", + "default", + "else", + "with", + "null", + "true", + "false", + "some", + "if", + "contains", + "in", + "every", +} + +func KeywordsForRegoVersion(v RegoVersion) []string { + switch v { + case RegoV0: + return KeywordsV0[:] + case RegoV1, RegoV0CompatV1: + return KeywordsV1[:] + } + return nil +} + +// IsKeyword returns true if s is a language keyword. +func IsKeyword(s string) bool { + return IsInKeywords(s, Keywords) +} + +func IsInKeywords(s string, keywords []string) bool { + for _, x := range keywords { + if x == s { + return true + } + } + return false +} + +// IsKeywordInRegoVersion returns true if s is a language keyword. +func IsKeywordInRegoVersion(s string, regoVersion RegoVersion) bool { + switch regoVersion { + case RegoV0: + for _, x := range KeywordsV0 { + if x == s { + return true + } + } + case RegoV1, RegoV0CompatV1: + for _, x := range KeywordsV1 { + if x == s { + return true + } + } + } + + return false +} + +type ( + // Node represents a node in an AST. Nodes may be statements in a policy module + // or elements of an ad-hoc query, expression, etc. + Node interface { + fmt.Stringer + Loc() *Location + SetLoc(*Location) + } + + // Statement represents a single statement in a policy module. + Statement interface { + Node + } +) + +type ( + + // Module represents a collection of policies (defined by rules) + // within a namespace (defined by the package) and optional + // dependencies on external documents (defined by imports). + Module struct { + Package *Package `json:"package"` + Imports []*Import `json:"imports,omitempty"` + Annotations []*Annotations `json:"annotations,omitempty"` + Rules []*Rule `json:"rules,omitempty"` + Comments []*Comment `json:"comments,omitempty"` + stmts []Statement + regoVersion RegoVersion + } + + // Comment contains the raw text from the comment in the definition. + Comment struct { + // TODO: these fields have inconsistent JSON keys with other structs in this package. + Text []byte + Location *Location + } + + // Package represents the namespace of the documents produced + // by rules inside the module. + Package struct { + Path Ref `json:"path"` + Location *Location `json:"location,omitempty"` + } + + // Import represents a dependency on a document outside of the policy + // namespace. Imports are optional. + Import struct { + Path *Term `json:"path"` + Alias Var `json:"alias,omitempty"` + Location *Location `json:"location,omitempty"` + } + + // Rule represents a rule as defined in the language. Rules define the + // content of documents that represent policy decisions. + Rule struct { + Default bool `json:"default,omitempty"` + Head *Head `json:"head"` + Body Body `json:"body"` + Else *Rule `json:"else,omitempty"` + Location *Location `json:"location,omitempty"` + Annotations []*Annotations `json:"annotations,omitempty"` + + // Module is a pointer to the module containing this rule. If the rule + // was NOT created while parsing/constructing a module, this should be + // left unset. The pointer is not included in any standard operations + // on the rule (e.g., printing, comparison, visiting, etc.) + Module *Module `json:"-"` + + generatedBody bool + } + + // Head represents the head of a rule. + Head struct { + Name Var `json:"name,omitempty"` + Reference Ref `json:"ref,omitempty"` + Args Args `json:"args,omitempty"` + Key *Term `json:"key,omitempty"` + Value *Term `json:"value,omitempty"` + Assign bool `json:"assign,omitempty"` + Location *Location `json:"location,omitempty"` + + keywords []tokens.Token + generatedValue bool + } + + // Args represents zero or more arguments to a rule. + Args []*Term + + // Body represents one or more expressions contained inside a rule or user + // function. + Body []*Expr + + // Expr represents a single expression contained inside the body of a rule. + Expr struct { + With []*With `json:"with,omitempty"` + Terms interface{} `json:"terms"` + Index int `json:"index"` + Generated bool `json:"generated,omitempty"` + Negated bool `json:"negated,omitempty"` + Location *Location `json:"location,omitempty"` + + generatedFrom *Expr + generates []*Expr + } + + // SomeDecl represents a variable declaration statement. The symbols are variables. + SomeDecl struct { + Symbols []*Term `json:"symbols"` + Location *Location `json:"location,omitempty"` + } + + Every struct { + Key *Term `json:"key"` + Value *Term `json:"value"` + Domain *Term `json:"domain"` + Body Body `json:"body"` + Location *Location `json:"location,omitempty"` + } + + // With represents a modifier on an expression. + With struct { + Target *Term `json:"target"` + Value *Term `json:"value"` + Location *Location `json:"location,omitempty"` + } +) + +// SetModuleRegoVersion sets the RegoVersion for the Module. +func SetModuleRegoVersion(mod *Module, v RegoVersion) { + mod.regoVersion = v +} + +// Compare returns an integer indicating whether mod is less than, equal to, +// or greater than other. +func (mod *Module) Compare(other *Module) int { + if mod == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := mod.Package.Compare(other.Package); cmp != 0 { + return cmp + } + if cmp := importsCompare(mod.Imports, other.Imports); cmp != 0 { + return cmp + } + if cmp := annotationsCompare(mod.Annotations, other.Annotations); cmp != 0 { + return cmp + } + return rulesCompare(mod.Rules, other.Rules) +} + +// Copy returns a deep copy of mod. +func (mod *Module) Copy() *Module { + cpy := *mod + cpy.Rules = make([]*Rule, len(mod.Rules)) + + nodes := make(map[Node]Node, len(mod.Rules)+len(mod.Imports)+1 /* package */) + + for i := range mod.Rules { + cpy.Rules[i] = mod.Rules[i].Copy() + cpy.Rules[i].Module = &cpy + nodes[mod.Rules[i]] = cpy.Rules[i] + } + + cpy.Imports = make([]*Import, len(mod.Imports)) + for i := range mod.Imports { + cpy.Imports[i] = mod.Imports[i].Copy() + nodes[mod.Imports[i]] = cpy.Imports[i] + } + + cpy.Package = mod.Package.Copy() + nodes[mod.Package] = cpy.Package + + cpy.Annotations = make([]*Annotations, len(mod.Annotations)) + for i, a := range mod.Annotations { + cpy.Annotations[i] = a.Copy(nodes[a.node]) + } + + cpy.Comments = make([]*Comment, len(mod.Comments)) + for i := range mod.Comments { + cpy.Comments[i] = mod.Comments[i].Copy() + } + + cpy.stmts = make([]Statement, len(mod.stmts)) + for i := range mod.stmts { + cpy.stmts[i] = nodes[mod.stmts[i]] + } + + return &cpy +} + +// Equal returns true if mod equals other. +func (mod *Module) Equal(other *Module) bool { + return mod.Compare(other) == 0 +} + +func (mod *Module) String() string { + byNode := map[Node][]*Annotations{} + for _, a := range mod.Annotations { + byNode[a.node] = append(byNode[a.node], a) + } + + appendAnnotationStrings := func(buf []string, node Node) []string { + if as, ok := byNode[node]; ok { + for i := range as { + buf = append(buf, "# METADATA") + buf = append(buf, "# "+as[i].String()) + } + } + return buf + } + + buf := []string{} + buf = appendAnnotationStrings(buf, mod.Package) + buf = append(buf, mod.Package.String()) + + if len(mod.Imports) > 0 { + buf = append(buf, "") + for _, imp := range mod.Imports { + buf = appendAnnotationStrings(buf, imp) + buf = append(buf, imp.String()) + } + } + if len(mod.Rules) > 0 { + buf = append(buf, "") + for _, rule := range mod.Rules { + buf = appendAnnotationStrings(buf, rule) + buf = append(buf, rule.stringWithOpts(toStringOpts{regoVersion: mod.regoVersion})) + } + } + return strings.Join(buf, "\n") +} + +// RuleSet returns a RuleSet containing named rules in the mod. +func (mod *Module) RuleSet(name Var) RuleSet { + rs := NewRuleSet() + for _, rule := range mod.Rules { + if rule.Head.Name.Equal(name) { + rs.Add(rule) + } + } + return rs +} + +// UnmarshalJSON parses bs and stores the result in mod. The rules in the module +// will have their module pointer set to mod. +func (mod *Module) UnmarshalJSON(bs []byte) error { + + // Declare a new type and use a type conversion to avoid recursively calling + // Module#UnmarshalJSON. + type module Module + + if err := util.UnmarshalJSON(bs, (*module)(mod)); err != nil { + return err + } + + WalkRules(mod, func(rule *Rule) bool { + rule.Module = mod + return false + }) + + return nil +} + +func (mod *Module) regoV1Compatible() bool { + return mod.regoVersion == RegoV1 || mod.regoVersion == RegoV0CompatV1 +} + +func (mod *Module) RegoVersion() RegoVersion { + return mod.regoVersion +} + +// SetRegoVersion sets the RegoVersion for the module. +// Note: Setting a rego-version that does not match the module's rego-version might have unintended consequences. +func (mod *Module) SetRegoVersion(v RegoVersion) { + mod.regoVersion = v +} + +// NewComment returns a new Comment object. +func NewComment(text []byte) *Comment { + return &Comment{ + Text: text, + } +} + +// Loc returns the location of the comment in the definition. +func (c *Comment) Loc() *Location { + if c == nil { + return nil + } + return c.Location +} + +// SetLoc sets the location on c. +func (c *Comment) SetLoc(loc *Location) { + c.Location = loc +} + +func (c *Comment) String() string { + return "#" + string(c.Text) +} + +// Copy returns a deep copy of c. +func (c *Comment) Copy() *Comment { + cpy := *c + cpy.Text = make([]byte, len(c.Text)) + copy(cpy.Text, c.Text) + return &cpy +} + +// Equal returns true if this comment equals the other comment. +// Unlike other equality checks on AST nodes, comment equality +// depends on location. +func (c *Comment) Equal(other *Comment) bool { + return c.Location.Equal(other.Location) && bytes.Equal(c.Text, other.Text) +} + +// Compare returns an integer indicating whether pkg is less than, equal to, +// or greater than other. +func (pkg *Package) Compare(other *Package) int { + return termSliceCompare(pkg.Path, other.Path) +} + +// Copy returns a deep copy of pkg. +func (pkg *Package) Copy() *Package { + cpy := *pkg + cpy.Path = pkg.Path.Copy() + return &cpy +} + +// Equal returns true if pkg is equal to other. +func (pkg *Package) Equal(other *Package) bool { + return pkg.Compare(other) == 0 +} + +// Loc returns the location of the Package in the definition. +func (pkg *Package) Loc() *Location { + if pkg == nil { + return nil + } + return pkg.Location +} + +// SetLoc sets the location on pkg. +func (pkg *Package) SetLoc(loc *Location) { + pkg.Location = loc +} + +func (pkg *Package) String() string { + if pkg == nil { + return "" + } else if len(pkg.Path) <= 1 { + return fmt.Sprintf("package ", pkg.Path) + } + // Omit head as all packages have the DefaultRootDocument prepended at parse time. + path := make(Ref, len(pkg.Path)-1) + path[0] = VarTerm(string(pkg.Path[1].Value.(String))) + copy(path[1:], pkg.Path[2:]) + return fmt.Sprintf("package %v", path) +} + +func (pkg *Package) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "path": pkg.Path, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Package { + if pkg.Location != nil { + data["location"] = pkg.Location + } + } + + return json.Marshal(data) +} + +// IsValidImportPath returns an error indicating if the import path is invalid. +// If the import path is valid, err is nil. +func IsValidImportPath(v Value) (err error) { + switch v := v.(type) { + case Var: + if !v.Equal(DefaultRootDocument.Value) && !v.Equal(InputRootDocument.Value) { + return fmt.Errorf("invalid path %v: path must begin with input or data", v) + } + case Ref: + if err := IsValidImportPath(v[0].Value); err != nil { + return fmt.Errorf("invalid path %v: path must begin with input or data", v) + } + for _, e := range v[1:] { + if _, ok := e.Value.(String); !ok { + return fmt.Errorf("invalid path %v: path elements must be strings", v) + } + } + default: + return fmt.Errorf("invalid path %v: path must be ref or var", v) + } + return nil +} + +// Compare returns an integer indicating whether imp is less than, equal to, +// or greater than other. +func (imp *Import) Compare(other *Import) int { + if imp == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := Compare(imp.Path, other.Path); cmp != 0 { + return cmp + } + + return VarCompare(imp.Alias, other.Alias) +} + +// Copy returns a deep copy of imp. +func (imp *Import) Copy() *Import { + cpy := *imp + cpy.Path = imp.Path.Copy() + return &cpy +} + +// Equal returns true if imp is equal to other. +func (imp *Import) Equal(other *Import) bool { + return imp.Compare(other) == 0 +} + +// Loc returns the location of the Import in the definition. +func (imp *Import) Loc() *Location { + if imp == nil { + return nil + } + return imp.Location +} + +// SetLoc sets the location on imp. +func (imp *Import) SetLoc(loc *Location) { + imp.Location = loc +} + +// Name returns the variable that is used to refer to the imported virtual +// document. This is the alias if defined otherwise the last element in the +// path. +func (imp *Import) Name() Var { + if len(imp.Alias) != 0 { + return imp.Alias + } + switch v := imp.Path.Value.(type) { + case Var: + return v + case Ref: + if len(v) == 1 { + return v[0].Value.(Var) + } + return Var(v[len(v)-1].Value.(String)) + } + panic("illegal import") +} + +func (imp *Import) String() string { + buf := []string{"import", imp.Path.String()} + if len(imp.Alias) > 0 { + buf = append(buf, "as", imp.Alias.String()) + } + return strings.Join(buf, " ") +} + +func (imp *Import) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "path": imp.Path, + } + + if len(imp.Alias) != 0 { + data["alias"] = imp.Alias + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Import { + if imp.Location != nil { + data["location"] = imp.Location + } + } + + return json.Marshal(data) +} + +// Compare returns an integer indicating whether rule is less than, equal to, +// or greater than other. +func (rule *Rule) Compare(other *Rule) int { + if rule == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := rule.Head.Compare(other.Head); cmp != 0 { + return cmp + } + if rule.Default != other.Default { + if !rule.Default { + return -1 + } + return 1 + } + if cmp := rule.Body.Compare(other.Body); cmp != 0 { + return cmp + } + + if cmp := annotationsCompare(rule.Annotations, other.Annotations); cmp != 0 { + return cmp + } + + return rule.Else.Compare(other.Else) +} + +// Copy returns a deep copy of rule. +func (rule *Rule) Copy() *Rule { + cpy := *rule + cpy.Head = rule.Head.Copy() + cpy.Body = rule.Body.Copy() + + if len(cpy.Annotations) > 0 { + cpy.Annotations = make([]*Annotations, len(rule.Annotations)) + for i, a := range rule.Annotations { + cpy.Annotations[i] = a.Copy(&cpy) + } + } + + if cpy.Else != nil { + cpy.Else = rule.Else.Copy() + } + return &cpy +} + +// Equal returns true if rule is equal to other. +func (rule *Rule) Equal(other *Rule) bool { + return rule.Compare(other) == 0 +} + +// Loc returns the location of the Rule in the definition. +func (rule *Rule) Loc() *Location { + if rule == nil { + return nil + } + return rule.Location +} + +// SetLoc sets the location on rule. +func (rule *Rule) SetLoc(loc *Location) { + rule.Location = loc +} + +// Path returns a ref referring to the document produced by this rule. If rule +// is not contained in a module, this function panics. +// Deprecated: Poor handling of ref rules. Use `(*Rule).Ref()` instead. +func (rule *Rule) Path() Ref { + if rule.Module == nil { + panic("assertion failed") + } + return rule.Module.Package.Path.Extend(rule.Head.Ref().GroundPrefix()) +} + +// Ref returns a ref referring to the document produced by this rule. If rule +// is not contained in a module, this function panics. The returned ref may +// contain variables in the last position. +func (rule *Rule) Ref() Ref { + if rule.Module == nil { + panic("assertion failed") + } + return rule.Module.Package.Path.Extend(rule.Head.Ref()) +} + +func (rule *Rule) String() string { + regoVersion := DefaultRegoVersion + if rule.Module != nil { + regoVersion = rule.Module.RegoVersion() + } + return rule.stringWithOpts(toStringOpts{regoVersion: regoVersion}) +} + +type toStringOpts struct { + regoVersion RegoVersion +} + +func (o toStringOpts) RegoVersion() RegoVersion { + if o.regoVersion == RegoUndefined { + return DefaultRegoVersion + } + return o.regoVersion +} + +func (rule *Rule) stringWithOpts(opts toStringOpts) string { + buf := []string{} + if rule.Default { + buf = append(buf, "default") + } + buf = append(buf, rule.Head.stringWithOpts(opts)) + if !rule.Default { + switch opts.RegoVersion() { + case RegoV1, RegoV0CompatV1: + buf = append(buf, "if") + } + buf = append(buf, "{", rule.Body.String(), "}") + } + if rule.Else != nil { + buf = append(buf, rule.Else.elseString(opts)) + } + return strings.Join(buf, " ") +} + +func (rule *Rule) isFunction() bool { + return len(rule.Head.Args) > 0 +} + +func (rule *Rule) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "head": rule.Head, + "body": rule.Body, + } + + if rule.Default { + data["default"] = true + } + + if rule.Else != nil { + data["else"] = rule.Else + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Rule { + if rule.Location != nil { + data["location"] = rule.Location + } + } + + if len(rule.Annotations) != 0 { + data["annotations"] = rule.Annotations + } + + return json.Marshal(data) +} + +func (rule *Rule) elseString(opts toStringOpts) string { + var buf []string + + buf = append(buf, "else") + + value := rule.Head.Value + if value != nil { + buf = append(buf, "=", value.String()) + } + + switch opts.RegoVersion() { + case RegoV1, RegoV0CompatV1: + buf = append(buf, "if") + } + + buf = append(buf, "{", rule.Body.String(), "}") + + if rule.Else != nil { + buf = append(buf, rule.Else.elseString(opts)) + } + + return strings.Join(buf, " ") +} + +// NewHead returns a new Head object. If args are provided, the first will be +// used for the key and the second will be used for the value. +func NewHead(name Var, args ...*Term) *Head { + head := &Head{ + Name: name, // backcompat + Reference: []*Term{NewTerm(name)}, + } + if len(args) == 0 { + return head + } + head.Key = args[0] + if len(args) == 1 { + return head + } + head.Value = args[1] + if head.Key != nil && head.Value != nil { + head.Reference = head.Reference.Append(args[0]) + } + return head +} + +// VarHead creates a head object, initializes its Name and Location and returns the new head. +// NOTE: The JSON options argument is no longer used, and kept only for backwards compatibility. +func VarHead(name Var, location *Location, _ *astJSON.Options) *Head { + h := NewHead(name) + h.Reference[0].Location = location + return h +} + +// RefHead returns a new Head object with the passed Ref. If args are provided, +// the first will be used for the value. +func RefHead(ref Ref, args ...*Term) *Head { + head := &Head{} + head.SetRef(ref) + if len(ref) < 2 { + head.Name = ref[0].Value.(Var) + } + if len(args) >= 1 { + head.Value = args[0] + } + return head +} + +// DocKind represents the collection of document types that can be produced by rules. +type DocKind byte + +const ( + // CompleteDoc represents a document that is completely defined by the rule. + CompleteDoc = iota + + // PartialSetDoc represents a set document that is partially defined by the rule. + PartialSetDoc + + // PartialObjectDoc represents an object document that is partially defined by the rule. + PartialObjectDoc +) // TODO(sr): Deprecate? + +// DocKind returns the type of document produced by this rule. +func (head *Head) DocKind() DocKind { + if head.Key != nil { + if head.Value != nil { + return PartialObjectDoc + } + return PartialSetDoc + } else if head.HasDynamicRef() { + return PartialObjectDoc + } + return CompleteDoc +} + +type RuleKind byte + +const ( + SingleValue = iota + MultiValue +) + +// RuleKind returns the type of rule this is +func (head *Head) RuleKind() RuleKind { + // NOTE(sr): This is bit verbose, since the key is irrelevant for single vs + // multi value, but as good a spot as to assert the invariant. + switch { + case head.Value != nil: + return SingleValue + case head.Key != nil: + return MultiValue + default: + panic("unreachable") + } +} + +// Ref returns the Ref of the rule. If it doesn't have one, it's filled in +// via the Head's Name. +func (head *Head) Ref() Ref { + if len(head.Reference) > 0 { + return head.Reference + } + return Ref{&Term{Value: head.Name}} +} + +// SetRef can be used to set a rule head's Reference +func (head *Head) SetRef(r Ref) { + head.Reference = r +} + +// Compare returns an integer indicating whether head is less than, equal to, +// or greater than other. +func (head *Head) Compare(other *Head) int { + if head == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if head.Assign && !other.Assign { + return -1 + } else if !head.Assign && other.Assign { + return 1 + } + if cmp := Compare(head.Args, other.Args); cmp != 0 { + return cmp + } + if cmp := Compare(head.Reference, other.Reference); cmp != 0 { + return cmp + } + if cmp := VarCompare(head.Name, other.Name); cmp != 0 { + return cmp + } + if cmp := Compare(head.Key, other.Key); cmp != 0 { + return cmp + } + return Compare(head.Value, other.Value) +} + +// Copy returns a deep copy of head. +func (head *Head) Copy() *Head { + cpy := *head + cpy.Reference = head.Reference.Copy() + cpy.Args = head.Args.Copy() + cpy.Key = head.Key.Copy() + cpy.Value = head.Value.Copy() + cpy.keywords = nil + return &cpy +} + +// Equal returns true if this head equals other. +func (head *Head) Equal(other *Head) bool { + return head.Compare(other) == 0 +} + +func (head *Head) String() string { + return head.stringWithOpts(toStringOpts{}) +} + +func (head *Head) stringWithOpts(opts toStringOpts) string { + buf := strings.Builder{} + buf.WriteString(head.Ref().String()) + containsAdded := false + + switch { + case len(head.Args) != 0: + buf.WriteString(head.Args.String()) + case len(head.Reference) == 1 && head.Key != nil: + switch opts.RegoVersion() { + case RegoV0: + buf.WriteRune('[') + buf.WriteString(head.Key.String()) + buf.WriteRune(']') + default: + containsAdded = true + buf.WriteString(" contains ") + buf.WriteString(head.Key.String()) + } + } + if head.Value != nil { + if head.Assign { + buf.WriteString(" := ") + } else { + buf.WriteString(" = ") + } + buf.WriteString(head.Value.String()) + } else if !containsAdded && head.Name == "" && head.Key != nil { + buf.WriteString(" contains ") + buf.WriteString(head.Key.String()) + } + return buf.String() +} + +func (head *Head) MarshalJSON() ([]byte, error) { + var loc *Location + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Head && head.Location != nil { + loc = head.Location + } + + // NOTE(sr): we do this to override the rendering of `head.Reference`. + // It's still what'll be used via the default means of encoding/json + // for unmarshaling a json object into a Head struct! + type h Head + return json.Marshal(struct { + h + Ref Ref `json:"ref"` + Location *Location `json:"location,omitempty"` + }{ + h: h(*head), + Ref: head.Ref(), + Location: loc, + }) +} + +// Vars returns a set of vars found in the head. +func (head *Head) Vars() VarSet { + vis := &VarVisitor{vars: VarSet{}} + // TODO: improve test coverage for this. + if head.Args != nil { + vis.Walk(head.Args) + } + if head.Key != nil { + vis.Walk(head.Key) + } + if head.Value != nil { + vis.Walk(head.Value) + } + if len(head.Reference) > 0 { + vis.Walk(head.Reference[1:]) + } + return vis.vars +} + +// Loc returns the Location of head. +func (head *Head) Loc() *Location { + if head == nil { + return nil + } + return head.Location +} + +// SetLoc sets the location on head. +func (head *Head) SetLoc(loc *Location) { + head.Location = loc +} + +func (head *Head) HasDynamicRef() bool { + pos := head.Reference.Dynamic() + return pos > 0 && (pos < len(head.Reference)) +} + +// Copy returns a deep copy of a. +func (a Args) Copy() Args { + cpy := Args{} + for _, t := range a { + cpy = append(cpy, t.Copy()) + } + return cpy +} + +func (a Args) String() string { + buf := make([]string, 0, len(a)) + for _, t := range a { + buf = append(buf, t.String()) + } + return "(" + strings.Join(buf, ", ") + ")" +} + +// Loc returns the Location of a. +func (a Args) Loc() *Location { + if len(a) == 0 { + return nil + } + return a[0].Location +} + +// SetLoc sets the location on a. +func (a Args) SetLoc(loc *Location) { + if len(a) != 0 { + a[0].SetLocation(loc) + } +} + +// Vars returns a set of vars that appear in a. +func (a Args) Vars() VarSet { + vis := &VarVisitor{vars: VarSet{}} + vis.Walk(a) + return vis.vars +} + +// NewBody returns a new Body containing the given expressions. The indices of +// the immediate expressions will be reset. +func NewBody(exprs ...*Expr) Body { + for i, expr := range exprs { + expr.Index = i + } + return Body(exprs) +} + +// MarshalJSON returns JSON encoded bytes representing body. +func (body Body) MarshalJSON() ([]byte, error) { + // Serialize empty Body to empty array. This handles both the empty case and the + // nil case (whereas by default the result would be null if body was nil.) + if len(body) == 0 { + return []byte(`[]`), nil + } + ret, err := json.Marshal([]*Expr(body)) + return ret, err +} + +// Append adds the expr to the body and updates the expr's index accordingly. +func (body *Body) Append(expr *Expr) { + n := len(*body) + expr.Index = n + *body = append(*body, expr) +} + +// Set sets the expr in the body at the specified position and updates the +// expr's index accordingly. +func (body Body) Set(expr *Expr, pos int) { + body[pos] = expr + expr.Index = pos +} + +// Compare returns an integer indicating whether body is less than, equal to, +// or greater than other. +// +// If body is a subset of other, it is considered less than (and vice versa). +func (body Body) Compare(other Body) int { + minLen := len(body) + if len(other) < minLen { + minLen = len(other) + } + for i := range minLen { + if cmp := body[i].Compare(other[i]); cmp != 0 { + return cmp + } + } + if len(body) < len(other) { + return -1 + } + if len(other) < len(body) { + return 1 + } + return 0 +} + +// Copy returns a deep copy of body. +func (body Body) Copy() Body { + cpy := make(Body, len(body)) + for i := range body { + cpy[i] = body[i].Copy() + } + return cpy +} + +// Contains returns true if this body contains the given expression. +func (body Body) Contains(x *Expr) bool { + return slices.ContainsFunc(body, x.Equal) +} + +// Equal returns true if this Body is equal to the other Body. +func (body Body) Equal(other Body) bool { + return body.Compare(other) == 0 +} + +// Hash returns the hash code for the Body. +func (body Body) Hash() int { + s := 0 + for _, e := range body { + s += e.Hash() + } + return s +} + +// IsGround returns true if all of the expressions in the Body are ground. +func (body Body) IsGround() bool { + for _, e := range body { + if !e.IsGround() { + return false + } + } + return true +} + +// Loc returns the location of the Body in the definition. +func (body Body) Loc() *Location { + if len(body) == 0 { + return nil + } + return body[0].Location +} + +// SetLoc sets the location on body. +func (body Body) SetLoc(loc *Location) { + if len(body) != 0 { + body[0].SetLocation(loc) + } +} + +func (body Body) String() string { + buf := make([]string, 0, len(body)) + for _, v := range body { + buf = append(buf, v.String()) + } + return strings.Join(buf, "; ") +} + +// Vars returns a VarSet containing variables in body. The params can be set to +// control which vars are included. +func (body Body) Vars(params VarVisitorParams) VarSet { + vis := NewVarVisitor().WithParams(params) + vis.Walk(body) + return vis.Vars() +} + +// NewExpr returns a new Expr object. +func NewExpr(terms interface{}) *Expr { + switch terms.(type) { + case *SomeDecl, *Every, *Term, []*Term: // ok + default: + panic("unreachable") + } + return &Expr{ + Negated: false, + Terms: terms, + Index: 0, + With: nil, + } +} + +// Complement returns a copy of this expression with the negation flag flipped. +func (expr *Expr) Complement() *Expr { + cpy := *expr + cpy.Negated = !cpy.Negated + return &cpy +} + +// ComplementNoWith returns a copy of this expression with the negation flag flipped +// and the with modifier removed. This is the same as calling .Complement().NoWith() +// but without making an intermediate copy. +func (expr *Expr) ComplementNoWith() *Expr { + cpy := *expr + cpy.Negated = !cpy.Negated + cpy.With = nil + return &cpy +} + +// Equal returns true if this Expr equals the other Expr. +func (expr *Expr) Equal(other *Expr) bool { + return expr.Compare(other) == 0 +} + +// Compare returns an integer indicating whether expr is less than, equal to, +// or greater than other. +// +// Expressions are compared as follows: +// +// 1. Declarations are always less than other expressions. +// 2. Preceding expression (by Index) is always less than the other expression. +// 3. Non-negated expressions are always less than negated expressions. +// 4. Single term expressions are always less than built-in expressions. +// +// Otherwise, the expression terms are compared normally. If both expressions +// have the same terms, the modifiers are compared. +func (expr *Expr) Compare(other *Expr) int { + + if expr == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + + o1 := expr.sortOrder() + o2 := other.sortOrder() + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } + + switch { + case expr.Index < other.Index: + return -1 + case expr.Index > other.Index: + return 1 + } + + switch { + case expr.Negated && !other.Negated: + return 1 + case !expr.Negated && other.Negated: + return -1 + } + + switch t := expr.Terms.(type) { + case *Term: + if cmp := Compare(t.Value, other.Terms.(*Term).Value); cmp != 0 { + return cmp + } + case []*Term: + if cmp := termSliceCompare(t, other.Terms.([]*Term)); cmp != 0 { + return cmp + } + case *SomeDecl: + if cmp := Compare(t, other.Terms.(*SomeDecl)); cmp != 0 { + return cmp + } + case *Every: + if cmp := Compare(t, other.Terms.(*Every)); cmp != 0 { + return cmp + } + } + + return withSliceCompare(expr.With, other.With) +} + +func (expr *Expr) sortOrder() int { + switch expr.Terms.(type) { + case *SomeDecl: + return 0 + case *Term: + return 1 + case []*Term: + return 2 + case *Every: + return 3 + } + return -1 +} + +// CopyWithoutTerms returns a deep copy of expr without its Terms +func (expr *Expr) CopyWithoutTerms() *Expr { + cpy := *expr + + if expr.With != nil { + cpy.With = make([]*With, len(expr.With)) + for i := range expr.With { + cpy.With[i] = expr.With[i].Copy() + } + } + + return &cpy +} + +// Copy returns a deep copy of expr. +func (expr *Expr) Copy() *Expr { + + cpy := expr.CopyWithoutTerms() + + switch ts := expr.Terms.(type) { + case *SomeDecl: + cpy.Terms = ts.Copy() + case []*Term: + cpy.Terms = termSliceCopy(ts) + case *Term: + cpy.Terms = ts.Copy() + case *Every: + cpy.Terms = ts.Copy() + } + + return cpy +} + +// Hash returns the hash code of the Expr. +func (expr *Expr) Hash() int { + s := expr.Index + switch ts := expr.Terms.(type) { + case *SomeDecl: + s += ts.Hash() + case []*Term: + for _, t := range ts { + s += t.Value.Hash() + } + case *Term: + s += ts.Value.Hash() + } + if expr.Negated { + s++ + } + for _, w := range expr.With { + s += w.Hash() + } + return s +} + +// IncludeWith returns a copy of expr with the with modifier appended. +func (expr *Expr) IncludeWith(target *Term, value *Term) *Expr { + cpy := *expr + cpy.With = append(cpy.With, &With{Target: target, Value: value}) + return &cpy +} + +// NoWith returns a copy of expr where the with modifier has been removed. +func (expr *Expr) NoWith() *Expr { + cpy := *expr + cpy.With = nil + return &cpy +} + +// IsEquality returns true if this is an equality expression. +func (expr *Expr) IsEquality() bool { + return isGlobalBuiltin(expr, Var(Equality.Name)) +} + +// IsAssignment returns true if this an assignment expression. +func (expr *Expr) IsAssignment() bool { + return isGlobalBuiltin(expr, Var(Assign.Name)) +} + +// IsCall returns true if this expression calls a function. +func (expr *Expr) IsCall() bool { + _, ok := expr.Terms.([]*Term) + return ok +} + +// IsEvery returns true if this expression is an 'every' expression. +func (expr *Expr) IsEvery() bool { + _, ok := expr.Terms.(*Every) + return ok +} + +// IsSome returns true if this expression is a 'some' expression. +func (expr *Expr) IsSome() bool { + _, ok := expr.Terms.(*SomeDecl) + return ok +} + +// Operator returns the name of the function or built-in this expression refers +// to. If this expression is not a function call, returns nil. +func (expr *Expr) Operator() Ref { + op := expr.OperatorTerm() + if op == nil { + return nil + } + return op.Value.(Ref) +} + +// OperatorTerm returns the name of the function or built-in this expression +// refers to. If this expression is not a function call, returns nil. +func (expr *Expr) OperatorTerm() *Term { + terms, ok := expr.Terms.([]*Term) + if !ok || len(terms) == 0 { + return nil + } + return terms[0] +} + +// Operand returns the term at the zero-based pos. If the expr does not include +// at least pos+1 terms, this function returns nil. +func (expr *Expr) Operand(pos int) *Term { + terms, ok := expr.Terms.([]*Term) + if !ok { + return nil + } + idx := pos + 1 + if idx < len(terms) { + return terms[idx] + } + return nil +} + +// Operands returns the built-in function operands. +func (expr *Expr) Operands() []*Term { + terms, ok := expr.Terms.([]*Term) + if !ok { + return nil + } + return terms[1:] +} + +// IsGround returns true if all of the expression terms are ground. +func (expr *Expr) IsGround() bool { + switch ts := expr.Terms.(type) { + case []*Term: + for _, t := range ts[1:] { + if !t.IsGround() { + return false + } + } + case *Term: + return ts.IsGround() + } + return true +} + +// SetOperator sets the expr's operator and returns the expr itself. If expr is +// not a call expr, this function will panic. +func (expr *Expr) SetOperator(term *Term) *Expr { + expr.Terms.([]*Term)[0] = term + return expr +} + +// SetLocation sets the expr's location and returns the expr itself. +func (expr *Expr) SetLocation(loc *Location) *Expr { + expr.Location = loc + return expr +} + +// Loc returns the Location of expr. +func (expr *Expr) Loc() *Location { + if expr == nil { + return nil + } + return expr.Location +} + +// SetLoc sets the location on expr. +func (expr *Expr) SetLoc(loc *Location) { + expr.SetLocation(loc) +} + +func (expr *Expr) String() string { + buf := make([]string, 0, 2+len(expr.With)) + if expr.Negated { + buf = append(buf, "not") + } + switch t := expr.Terms.(type) { + case []*Term: + if expr.IsEquality() && validEqAssignArgCount(expr) { + buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2])) + } else { + buf = append(buf, Call(t).String()) + } + case fmt.Stringer: + buf = append(buf, t.String()) + } + + for i := range expr.With { + buf = append(buf, expr.With[i].String()) + } + + return strings.Join(buf, " ") +} + +func (expr *Expr) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "terms": expr.Terms, + "index": expr.Index, + } + + if len(expr.With) > 0 { + data["with"] = expr.With + } + + if expr.Generated { + data["generated"] = true + } + + if expr.Negated { + data["negated"] = true + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Expr { + if expr.Location != nil { + data["location"] = expr.Location + } + } + + return json.Marshal(data) +} + +// UnmarshalJSON parses the byte array and stores the result in expr. +func (expr *Expr) UnmarshalJSON(bs []byte) error { + v := map[string]interface{}{} + if err := util.UnmarshalJSON(bs, &v); err != nil { + return err + } + return unmarshalExpr(expr, v) +} + +// Vars returns a VarSet containing variables in expr. The params can be set to +// control which vars are included. +func (expr *Expr) Vars(params VarVisitorParams) VarSet { + vis := NewVarVisitor().WithParams(params) + vis.Walk(expr) + return vis.Vars() +} + +// NewBuiltinExpr creates a new Expr object with the supplied terms. +// The builtin operator must be the first term. +func NewBuiltinExpr(terms ...*Term) *Expr { + return &Expr{Terms: terms} +} + +func (expr *Expr) CogeneratedExprs() []*Expr { + visited := map[*Expr]struct{}{} + visitCogeneratedExprs(expr, func(e *Expr) bool { + if expr.Equal(e) { + return true + } + if _, ok := visited[e]; ok { + return true + } + visited[e] = struct{}{} + return false + }) + + result := make([]*Expr, 0, len(visited)) + for e := range visited { + result = append(result, e) + } + return result +} + +func (expr *Expr) BaseCogeneratedExpr() *Expr { + if expr.generatedFrom == nil { + return expr + } + return expr.generatedFrom.BaseCogeneratedExpr() +} + +func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) { + if parent := expr.generatedFrom; parent != nil { + if stop := f(parent); !stop { + visitCogeneratedExprs(parent, f) + } + } + for _, child := range expr.generates { + if stop := f(child); !stop { + visitCogeneratedExprs(child, f) + } + } +} + +func (d *SomeDecl) String() string { + if call, ok := d.Symbols[0].Value.(Call); ok { + if len(call) == 4 { + return "some " + call[1].String() + ", " + call[2].String() + " in " + call[3].String() + } + return "some " + call[1].String() + " in " + call[2].String() + } + buf := make([]string, len(d.Symbols)) + for i := range buf { + buf[i] = d.Symbols[i].String() + } + return "some " + strings.Join(buf, ", ") +} + +// SetLoc sets the Location on d. +func (d *SomeDecl) SetLoc(loc *Location) { + d.Location = loc +} + +// Loc returns the Location of d. +func (d *SomeDecl) Loc() *Location { + return d.Location +} + +// Copy returns a deep copy of d. +func (d *SomeDecl) Copy() *SomeDecl { + cpy := *d + cpy.Symbols = termSliceCopy(d.Symbols) + return &cpy +} + +// Compare returns an integer indicating whether d is less than, equal to, or +// greater than other. +func (d *SomeDecl) Compare(other *SomeDecl) int { + return termSliceCompare(d.Symbols, other.Symbols) +} + +// Hash returns a hash code of d. +func (d *SomeDecl) Hash() int { + return termSliceHash(d.Symbols) +} + +func (d *SomeDecl) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "symbols": d.Symbols, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.SomeDecl { + if d.Location != nil { + data["location"] = d.Location + } + } + + return json.Marshal(data) +} + +func (q *Every) String() string { + if q.Key != nil { + return fmt.Sprintf("every %s, %s in %s { %s }", + q.Key, + q.Value, + q.Domain, + q.Body) + } + return fmt.Sprintf("every %s in %s { %s }", + q.Value, + q.Domain, + q.Body) +} + +func (q *Every) Loc() *Location { + return q.Location +} + +func (q *Every) SetLoc(l *Location) { + q.Location = l +} + +// Copy returns a deep copy of d. +func (q *Every) Copy() *Every { + cpy := *q + cpy.Key = q.Key.Copy() + cpy.Value = q.Value.Copy() + cpy.Domain = q.Domain.Copy() + cpy.Body = q.Body.Copy() + return &cpy +} + +func (q *Every) Compare(other *Every) int { + for _, terms := range [][2]*Term{ + {q.Key, other.Key}, + {q.Value, other.Value}, + {q.Domain, other.Domain}, + } { + if d := Compare(terms[0], terms[1]); d != 0 { + return d + } + } + return q.Body.Compare(other.Body) +} + +// KeyValueVars returns the key and val arguments of an `every` +// expression, if they are non-nil and not wildcards. +func (q *Every) KeyValueVars() VarSet { + vis := &VarVisitor{vars: VarSet{}} + if q.Key != nil { + vis.Walk(q.Key) + } + vis.Walk(q.Value) + return vis.vars +} + +func (q *Every) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "key": q.Key, + "value": q.Value, + "domain": q.Domain, + "body": q.Body, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Every { + if q.Location != nil { + data["location"] = q.Location + } + } + + return json.Marshal(data) +} + +func (w *With) String() string { + return "with " + w.Target.String() + " as " + w.Value.String() +} + +// Equal returns true if this With is equals the other With. +func (w *With) Equal(other *With) bool { + return Compare(w, other) == 0 +} + +// Compare returns an integer indicating whether w is less than, equal to, or +// greater than other. +func (w *With) Compare(other *With) int { + if w == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := Compare(w.Target, other.Target); cmp != 0 { + return cmp + } + return Compare(w.Value, other.Value) +} + +// Copy returns a deep copy of w. +func (w *With) Copy() *With { + cpy := *w + cpy.Value = w.Value.Copy() + cpy.Target = w.Target.Copy() + return &cpy +} + +// Hash returns the hash code of the With. +func (w With) Hash() int { + return w.Target.Hash() + w.Value.Hash() +} + +// SetLocation sets the location on w. +func (w *With) SetLocation(loc *Location) *With { + w.Location = loc + return w +} + +// Loc returns the Location of w. +func (w *With) Loc() *Location { + if w == nil { + return nil + } + return w.Location +} + +// SetLoc sets the location on w. +func (w *With) SetLoc(loc *Location) { + w.Location = loc +} + +func (w *With) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "target": w.Target, + "value": w.Value, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.With { + if w.Location != nil { + data["location"] = w.Location + } + } + + return json.Marshal(data) +} + +// Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified. +func Copy(x interface{}) interface{} { + switch x := x.(type) { + case *Module: + return x.Copy() + case *Package: + return x.Copy() + case *Import: + return x.Copy() + case *Rule: + return x.Copy() + case *Head: + return x.Copy() + case Args: + return x.Copy() + case Body: + return x.Copy() + case *Expr: + return x.Copy() + case *With: + return x.Copy() + case *SomeDecl: + return x.Copy() + case *Every: + return x.Copy() + case *Term: + return x.Copy() + case *ArrayComprehension: + return x.Copy() + case *SetComprehension: + return x.Copy() + case *ObjectComprehension: + return x.Copy() + case Set: + return x.Copy() + case *object: + return x.Copy() + case *Array: + return x.Copy() + case Ref: + return x.Copy() + case Call: + return x.Copy() + case *Comment: + return x.Copy() + } + return x +} + +// RuleSet represents a collection of rules that produce a virtual document. +type RuleSet []*Rule + +// NewRuleSet returns a new RuleSet containing the given rules. +func NewRuleSet(rules ...*Rule) RuleSet { + rs := make(RuleSet, 0, len(rules)) + for _, rule := range rules { + rs.Add(rule) + } + return rs +} + +// Add inserts the rule into rs. +func (rs *RuleSet) Add(rule *Rule) { + for _, exist := range *rs { + if exist.Equal(rule) { + return + } + } + *rs = append(*rs, rule) +} + +// Contains returns true if rs contains rule. +func (rs RuleSet) Contains(rule *Rule) bool { + for i := range rs { + if rs[i].Equal(rule) { + return true + } + } + return false +} + +// Diff returns a new RuleSet containing rules in rs that are not in other. +func (rs RuleSet) Diff(other RuleSet) RuleSet { + result := NewRuleSet() + for i := range rs { + if !other.Contains(rs[i]) { + result.Add(rs[i]) + } + } + return result +} + +// Equal returns true if rs equals other. +func (rs RuleSet) Equal(other RuleSet) bool { + return len(rs.Diff(other)) == 0 && len(other.Diff(rs)) == 0 +} + +// Merge returns a ruleset containing the union of rules from rs an other. +func (rs RuleSet) Merge(other RuleSet) RuleSet { + result := NewRuleSet() + for i := range rs { + result.Add(rs[i]) + } + for i := range other { + result.Add(other[i]) + } + return result +} + +func (rs RuleSet) String() string { + buf := make([]string, 0, len(rs)) + for _, rule := range rs { + buf = append(buf, rule.String()) + } + return "{" + strings.Join(buf, ", ") + "}" +} + +// Returns true if the equality or assignment expression referred to by expr +// has a valid number of arguments. +func validEqAssignArgCount(expr *Expr) bool { + return len(expr.Operands()) == 2 +} + +// this function checks if the expr refers to a non-namespaced (global) built-in +// function like eq, gt, plus, etc. +func isGlobalBuiltin(expr *Expr, name Var) bool { + terms, ok := expr.Terms.([]*Term) + if !ok { + return false + } + + // NOTE(tsandall): do not use Term#Equal or Value#Compare to avoid + // allocation here. + ref, ok := terms[0].Value.(Ref) + if !ok || len(ref) != 1 { + return false + } + if head, ok := ref[0].Value.(Var); ok { + return head.Equal(name) + } + return false +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go b/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go new file mode 100644 index 00000000000..b4f05ad5015 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go @@ -0,0 +1,82 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "io" + "strings" +) + +// Pretty writes a pretty representation of the AST rooted at x to w. +// +// This is function is intended for debug purposes when inspecting ASTs. +func Pretty(w io.Writer, x interface{}) { + pp := &prettyPrinter{ + depth: -1, + w: w, + } + NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x) +} + +type prettyPrinter struct { + depth int + w io.Writer +} + +func (pp *prettyPrinter) Before(x interface{}) bool { + switch x.(type) { + case *Term: + default: + pp.depth++ + } + + switch x := x.(type) { + case *Term: + return false + case Args: + if len(x) == 0 { + return false + } + pp.writeType(x) + case *Expr: + extras := []string{} + if x.Negated { + extras = append(extras, "negated") + } + extras = append(extras, fmt.Sprintf("index=%d", x.Index)) + pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " ")) + case Null, Boolean, Number, String, Var: + pp.writeValue(x) + default: + pp.writeType(x) + } + return false +} + +func (pp *prettyPrinter) After(x interface{}) { + switch x.(type) { + case *Term: + default: + pp.depth-- + } +} + +func (pp *prettyPrinter) writeValue(x interface{}) { + pp.writeIndent(fmt.Sprint(x)) +} + +func (pp *prettyPrinter) writeType(x interface{}) { + pp.writeIndent(TypeName(x)) +} + +func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) { + pad := strings.Repeat(" ", pp.depth) + pp.write(pad+f, a...) +} + +func (pp *prettyPrinter) write(f string, a ...interface{}) { + fmt.Fprintf(pp.w, f+"\n", a...) +} diff --git a/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go similarity index 96% rename from vendor/github.com/open-policy-agent/opa/ast/rego_v1.go rename to vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go index b64dfce7bef..8b757ecc3c3 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go @@ -3,7 +3,7 @@ package ast import ( "fmt" - "github.com/open-policy-agent/opa/ast/internal/tokens" + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" ) func checkDuplicateImports(modules []*Module) (errors Errors) { @@ -192,7 +192,7 @@ func checkRegoV1Rule(rule *Rule, opts RegoCheckOptions) Errors { var errs Errors if opts.NoKeywordsAsRuleNames && IsKeywordInRegoVersion(rule.Head.Name.String(), RegoV1) { - errs = append(errs, NewError(ParseErr, rule.Location, fmt.Sprintf("%s keyword cannot be used for rule name", rule.Head.Name.String()))) + errs = append(errs, NewError(ParseErr, rule.Location, "%s keyword cannot be used for rule name", rule.Head.Name.String())) } if opts.RequireRuleBodyOrValue && rule.generatedBody && rule.Head.generatedValue { errs = append(errs, NewError(ParseErr, rule.Location, "%s must have value assignment and/or body declaration", t)) diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go b/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go new file mode 100644 index 00000000000..3f9e2001d5a --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go @@ -0,0 +1,54 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// SchemaSet holds a map from a path to a schema. +type SchemaSet struct { + m *util.HasherMap[Ref, any] +} + +// NewSchemaSet returns an empty SchemaSet. +func NewSchemaSet() *SchemaSet { + return &SchemaSet{ + m: util.NewHasherMap[Ref, any](RefEqual), + } +} + +// Put inserts a raw schema into the set. +func (ss *SchemaSet) Put(path Ref, raw any) { + ss.m.Put(path, raw) +} + +// Get returns the raw schema identified by the path. +func (ss *SchemaSet) Get(path Ref) any { + if ss != nil { + if x, ok := ss.m.Get(path); ok { + return x + } + } + return nil +} + +func loadSchema(raw any, allowNet []string) (types.Type, error) { + + jsonSchema, err := compileSchema(raw, allowNet) + if err != nil { + return nil, err + } + + tpe, err := newSchemaParser().parseSchema(jsonSchema.RootSchema) + if err != nil { + return nil, fmt.Errorf("type checking: %w", err) + } + + return tpe, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go b/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go new file mode 100644 index 00000000000..40d66753f53 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go @@ -0,0 +1,54 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "reflect" + "strings" +) + +// TypeName returns a human readable name for the AST element type. +func TypeName(x interface{}) string { + if _, ok := x.(*lazyObj); ok { + return "object" + } + return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name()) +} + +// ValueName returns a human readable name for the AST Value type. +// This is preferrable over calling TypeName when the argument is known to be +// a Value, as this doesn't require reflection (= heap allocations). +func ValueName(x Value) string { + switch x.(type) { + case String: + return "string" + case Boolean: + return "boolean" + case Number: + return "number" + case Null: + return "null" + case Var: + return "var" + case Object: + return "object" + case Set: + return "set" + case Ref: + return "ref" + case Call: + return "call" + case *Array: + return "array" + case *ArrayComprehension: + return "arraycomprehension" + case *ObjectComprehension: + return "objectcomprehension" + case *SetComprehension: + return "setcomprehension" + } + + return TypeName(x) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go b/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go new file mode 100644 index 00000000000..cb150d39b52 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go @@ -0,0 +1,69 @@ +package ast + +import ( + "strings" + "sync" +) + +type termPtrPool struct { + pool sync.Pool +} + +type stringBuilderPool struct { + pool sync.Pool +} + +type indexResultPool struct { + pool sync.Pool +} + +func (p *termPtrPool) Get() *Term { + return p.pool.Get().(*Term) +} + +func (p *termPtrPool) Put(t *Term) { + p.pool.Put(t) +} + +func (p *stringBuilderPool) Get() *strings.Builder { + return p.pool.Get().(*strings.Builder) +} + +func (p *stringBuilderPool) Put(sb *strings.Builder) { + sb.Reset() + p.pool.Put(sb) +} + +func (p *indexResultPool) Get() *IndexResult { + return p.pool.Get().(*IndexResult) +} + +func (p *indexResultPool) Put(x *IndexResult) { + if x != nil { + p.pool.Put(x) + } +} + +var TermPtrPool = &termPtrPool{ + pool: sync.Pool{ + New: func() any { + return &Term{} + }, + }, +} + +var sbPool = &stringBuilderPool{ + pool: sync.Pool{ + New: func() any { + return &strings.Builder{} + }, + }, +} + +var IndexResultPool = &indexResultPool{ + pool: sync.Pool{ + New: func() any { + return &IndexResult{} + }, + }, +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/term.go b/vendor/github.com/open-policy-agent/opa/v1/ast/term.go new file mode 100644 index 00000000000..e0fda51e895 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/term.go @@ -0,0 +1,3411 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// nolint: deadcode // Public API. +package ast + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "math/big" + "net/url" + "regexp" + "slices" + "strconv" + "strings" + "sync" + + "github.com/cespare/xxhash/v2" + + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/util" +) + +var errFindNotFound = errors.New("find: not found") + +// Location records a position in source code. +type Location = location.Location + +// NewLocation returns a new Location object. +func NewLocation(text []byte, file string, row int, col int) *Location { + return location.NewLocation(text, file, row, col) +} + +// Value declares the common interface for all Term values. Every kind of Term value +// in the language is represented as a type that implements this interface: +// +// - Null, Boolean, Number, String +// - Object, Array, Set +// - Variables, References +// - Array, Set, and Object Comprehensions +// - Calls +type Value interface { + Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively. + Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found. + Hash() int // Returns hash code of the value. + IsGround() bool // IsGround returns true if this value is not a variable or contains no variables. + String() string // String returns a human readable string representation of the value. +} + +// InterfaceToValue converts a native Go value x to a Value. +func InterfaceToValue(x interface{}) (Value, error) { + switch x := x.(type) { + case Value: + return x, nil + case nil: + return NullValue, nil + case bool: + return InternedBooleanTerm(x).Value, nil + case json.Number: + if interned := InternedIntNumberTermFromString(string(x)); interned != nil { + return interned.Value, nil + } + return Number(x), nil + case int64: + return int64Number(x), nil + case uint64: + return uint64Number(x), nil + case float64: + return floatNumber(x), nil + case int: + return intNumber(x), nil + case string: + return String(x), nil + case []any: + r := util.NewPtrSlice[Term](len(x)) + for i, e := range x { + e, err := InterfaceToValue(e) + if err != nil { + return nil, err + } + r[i].Value = e + } + return NewArray(r...), nil + case []string: + r := util.NewPtrSlice[Term](len(x)) + for i, e := range x { + r[i].Value = String(e) + } + return NewArray(r...), nil + case map[string]any: + kvs := util.NewPtrSlice[Term](len(x) * 2) + idx := 0 + for k, v := range x { + kvs[idx].Value = String(k) + v, err := InterfaceToValue(v) + if err != nil { + return nil, err + } + kvs[idx+1].Value = v + idx += 2 + } + tuples := make([][2]*Term, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + tuples[i/2] = *(*[2]*Term)(kvs[i : i+2]) + } + return NewObject(tuples...), nil + case map[string]string: + r := newobject(len(x)) + for k, v := range x { + r.Insert(StringTerm(k), StringTerm(v)) + } + return r, nil + default: + ptr := util.Reference(x) + if err := util.RoundTrip(ptr); err != nil { + return nil, fmt.Errorf("ast: interface conversion: %w", err) + } + return InterfaceToValue(*ptr) + } +} + +// ValueFromReader returns an AST value from a JSON serialized value in the reader. +func ValueFromReader(r io.Reader) (Value, error) { + var x interface{} + if err := util.NewJSONDecoder(r).Decode(&x); err != nil { + return nil, err + } + return InterfaceToValue(x) +} + +// As converts v into a Go native type referred to by x. +func As(v Value, x interface{}) error { + return util.NewJSONDecoder(strings.NewReader(v.String())).Decode(x) +} + +// Resolver defines the interface for resolving references to native Go values. +type Resolver interface { + Resolve(Ref) (interface{}, error) +} + +// ValueResolver defines the interface for resolving references to AST values. +type ValueResolver interface { + Resolve(Ref) (Value, error) +} + +// UnknownValueErr indicates a ValueResolver was unable to resolve a reference +// because the reference refers to an unknown value. +type UnknownValueErr struct{} + +func (UnknownValueErr) Error() string { + return "unknown value" +} + +// IsUnknownValueErr returns true if the err is an UnknownValueErr. +func IsUnknownValueErr(err error) bool { + _, ok := err.(UnknownValueErr) + return ok +} + +type illegalResolver struct{} + +func (illegalResolver) Resolve(ref Ref) (interface{}, error) { + return nil, fmt.Errorf("illegal value: %v", ref) +} + +// ValueToInterface returns the Go representation of an AST value. The AST +// value should not contain any values that require evaluation (e.g., vars, +// comprehensions, etc.) +func ValueToInterface(v Value, resolver Resolver) (interface{}, error) { + return valueToInterface(v, resolver, JSONOpt{}) +} + +func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (interface{}, error) { + switch v := v.(type) { + case Null: + return nil, nil + case Boolean: + return bool(v), nil + case Number: + return json.Number(v), nil + case String: + return string(v), nil + case *Array: + buf := []interface{}{} + for i := range v.Len() { + x1, err := valueToInterface(v.Elem(i).Value, resolver, opt) + if err != nil { + return nil, err + } + buf = append(buf, x1) + } + return buf, nil + case *object: + buf := make(map[string]interface{}, v.Len()) + err := v.Iter(func(k, v *Term) error { + ki, err := valueToInterface(k.Value, resolver, opt) + if err != nil { + return err + } + var str string + var ok bool + if str, ok = ki.(string); !ok { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(ki); err != nil { + return err + } + str = strings.TrimSpace(buf.String()) + } + vi, err := valueToInterface(v.Value, resolver, opt) + if err != nil { + return err + } + buf[str] = vi + return nil + }) + if err != nil { + return nil, err + } + return buf, nil + case *lazyObj: + if opt.CopyMaps { + return valueToInterface(v.force(), resolver, opt) + } + return v.native, nil + case Set: + buf := []interface{}{} + iter := func(x *Term) error { + x1, err := valueToInterface(x.Value, resolver, opt) + if err != nil { + return err + } + buf = append(buf, x1) + return nil + } + var err error + if opt.SortSets { + err = v.Sorted().Iter(iter) + } else { + err = v.Iter(iter) + } + if err != nil { + return nil, err + } + return buf, nil + case Ref: + return resolver.Resolve(v) + default: + return nil, fmt.Errorf("%v requires evaluation", TypeName(v)) + } +} + +// JSON returns the JSON representation of v. The value must not contain any +// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) +func JSON(v Value) (interface{}, error) { + return JSONWithOpt(v, JSONOpt{}) +} + +// JSONOpt defines parameters for AST to JSON conversion. +type JSONOpt struct { + SortSets bool // sort sets before serializing (this makes conversion more expensive) + CopyMaps bool // enforces copying of map[string]interface{} read from the store +} + +// JSONWithOpt returns the JSON representation of v. The value must not contain any +// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) +func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) { + return valueToInterface(v, illegalResolver{}, opt) +} + +// MustJSON returns the JSON representation of v. The value must not contain any +// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) If +// the conversion fails, this function will panic. This function is mostly for +// test purposes. +func MustJSON(v Value) interface{} { + r, err := JSON(v) + if err != nil { + panic(err) + } + return r +} + +// MustInterfaceToValue converts a native Go value x to a Value. If the +// conversion fails, this function will panic. This function is mostly for test +// purposes. +func MustInterfaceToValue(x interface{}) Value { + v, err := InterfaceToValue(x) + if err != nil { + panic(err) + } + return v +} + +// Term is an argument to a function. +type Term struct { + Value Value `json:"value"` // the value of the Term as represented in Go + Location *Location `json:"location,omitempty"` // the location of the Term in the source +} + +// NewTerm returns a new Term object. +func NewTerm(v Value) *Term { + return &Term{ + Value: v, + } +} + +// SetLocation updates the term's Location and returns the term itself. +func (term *Term) SetLocation(loc *Location) *Term { + term.Location = loc + return term +} + +// Loc returns the Location of term. +func (term *Term) Loc() *Location { + if term == nil { + return nil + } + return term.Location +} + +// SetLoc sets the location on term. +func (term *Term) SetLoc(loc *Location) { + term.SetLocation(loc) +} + +// Copy returns a deep copy of term. +func (term *Term) Copy() *Term { + if term == nil { + return nil + } + + cpy := *term + + switch v := term.Value.(type) { + case Null, Boolean, Number, String, Var: + cpy.Value = v + case Ref: + cpy.Value = v.Copy() + case *Array: + cpy.Value = v.Copy() + case Set: + cpy.Value = v.Copy() + case *object: + cpy.Value = v.Copy() + case *ArrayComprehension: + cpy.Value = v.Copy() + case *ObjectComprehension: + cpy.Value = v.Copy() + case *SetComprehension: + cpy.Value = v.Copy() + case Call: + cpy.Value = v.Copy() + } + + return &cpy +} + +// Equal returns true if this term equals the other term. Equality is +// defined for each kind of term, and does not compare the Location. +func (term *Term) Equal(other *Term) bool { + if term == nil && other != nil { + return false + } + if term != nil && other == nil { + return false + } + if term == other { + return true + } + + return ValueEqual(term.Value, other.Value) +} + +// Get returns a value referred to by name from the term. +func (term *Term) Get(name *Term) *Term { + switch v := term.Value.(type) { + case *object: + return v.Get(name) + case *Array: + return v.Get(name) + case interface { + Get(*Term) *Term + }: + return v.Get(name) + case Set: + if v.Contains(name) { + return name + } + } + return nil +} + +// Hash returns the hash code of the Term's Value. Its Location +// is ignored. +func (term *Term) Hash() int { + return term.Value.Hash() +} + +// IsGround returns true if this term's Value is ground. +func (term *Term) IsGround() bool { + return term.Value.IsGround() +} + +// MarshalJSON returns the JSON encoding of the term. +// +// Specialized marshalling logic is required to include a type hint for Value. +func (term *Term) MarshalJSON() ([]byte, error) { + d := map[string]interface{}{ + "type": ValueName(term.Value), + "value": term.Value, + } + jsonOptions := astJSON.GetOptions().MarshalOptions + if jsonOptions.IncludeLocation.Term { + if term.Location != nil { + d["location"] = term.Location + } + } + return json.Marshal(d) +} + +func (term *Term) String() string { + return term.Value.String() +} + +// UnmarshalJSON parses the byte array and stores the result in term. +// Specialized unmarshalling is required to handle Value and Location. +func (term *Term) UnmarshalJSON(bs []byte) error { + v := map[string]interface{}{} + if err := util.UnmarshalJSON(bs, &v); err != nil { + return err + } + val, err := unmarshalValue(v) + if err != nil { + return err + } + term.Value = val + + if loc, ok := v["location"].(map[string]interface{}); ok { + term.Location = &Location{} + err := unmarshalLocation(term.Location, loc) + if err != nil { + return err + } + } + return nil +} + +// Vars returns a VarSet with variables contained in this term. +func (term *Term) Vars() VarSet { + vis := &VarVisitor{vars: VarSet{}} + vis.Walk(term) + return vis.vars +} + +// IsConstant returns true if the AST value is constant. +func IsConstant(v Value) bool { + found := false + vis := GenericVisitor{ + func(x interface{}) bool { + switch x.(type) { + case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: + found = true + return true + } + return false + }, + } + vis.Walk(v) + return !found +} + +// IsComprehension returns true if the supplied value is a comprehension. +func IsComprehension(x Value) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return true + } + return false +} + +// ContainsRefs returns true if the Value v contains refs. +func ContainsRefs(v interface{}) bool { + found := false + WalkRefs(v, func(Ref) bool { + found = true + return found + }) + return found +} + +// ContainsComprehensions returns true if the Value v contains comprehensions. +func ContainsComprehensions(v interface{}) bool { + found := false + WalkClosures(v, func(x interface{}) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + found = true + return found + } + return found + }) + return found +} + +// ContainsClosures returns true if the Value v contains closures. +func ContainsClosures(v interface{}) bool { + found := false + WalkClosures(v, func(x interface{}) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: + found = true + return found + } + return found + }) + return found +} + +// IsScalar returns true if the AST value is a scalar. +func IsScalar(v Value) bool { + switch v.(type) { + case String, Number, Boolean, Null: + return true + } + return false +} + +// Null represents the null value defined by JSON. +type Null struct{} + +var NullValue Value = Null{} + +// NullTerm creates a new Term with a Null value. +func NullTerm() *Term { + return &Term{Value: NullValue} +} + +// Equal returns true if the other term Value is also Null. +func (Null) Equal(other Value) bool { + switch other.(type) { + case Null: + return true + default: + return false + } +} + +// Compare compares null to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (Null) Compare(other Value) int { + if _, ok := other.(Null); ok { + return 0 + } + return -1 +} + +// Find returns the current value or a not found error. +func (Null) Find(path Ref) (Value, error) { + if len(path) == 0 { + return NullValue, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (Null) Hash() int { + return 0 +} + +// IsGround always returns true. +func (Null) IsGround() bool { + return true +} + +func (Null) String() string { + return "null" +} + +// Boolean represents a boolean value defined by JSON. +type Boolean bool + +// BooleanTerm creates a new Term with a Boolean value. +func BooleanTerm(b bool) *Term { + if b { + return &Term{Value: InternedBooleanTerm(true).Value} + } + return &Term{Value: InternedBooleanTerm(false).Value} +} + +// Equal returns true if the other Value is a Boolean and is equal. +func (bol Boolean) Equal(other Value) bool { + switch other := other.(type) { + case Boolean: + return bol == other + default: + return false + } +} + +// Compare compares bol to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (bol Boolean) Compare(other Value) int { + switch other := other.(type) { + case Boolean: + if bol == other { + return 0 + } + if !bol { + return -1 + } + return 1 + case Null: + return 1 + } + + return -1 +} + +// Find returns the current value or a not found error. +func (bol Boolean) Find(path Ref) (Value, error) { + if len(path) == 0 { + return InternedBooleanTerm(bool(bol)).Value, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (bol Boolean) Hash() int { + if bol { + return 1 + } + return 0 +} + +// IsGround always returns true. +func (Boolean) IsGround() bool { + return true +} + +func (bol Boolean) String() string { + return strconv.FormatBool(bool(bol)) +} + +// Number represents a numeric value as defined by JSON. +type Number json.Number + +// NumberTerm creates a new Term with a Number value. +func NumberTerm(n json.Number) *Term { + return &Term{Value: Number(n)} +} + +// IntNumberTerm creates a new Term with an integer Number value. +func IntNumberTerm(i int) *Term { + return &Term{Value: Number(strconv.Itoa(i))} +} + +// UIntNumberTerm creates a new Term with an unsigned integer Number value. +func UIntNumberTerm(u uint64) *Term { + return &Term{Value: uint64Number(u)} +} + +// FloatNumberTerm creates a new Term with a floating point Number value. +func FloatNumberTerm(f float64) *Term { + s := strconv.FormatFloat(f, 'g', -1, 64) + return &Term{Value: Number(s)} +} + +// Equal returns true if the other Value is a Number and is equal. +func (num Number) Equal(other Value) bool { + switch other := other.(type) { + case Number: + if n1, ok1 := num.Int64(); ok1 { + n2, ok2 := other.Int64() + if ok1 && ok2 && n1 == n2 { + return true + } + } + + return num.Compare(other) == 0 + default: + return false + } +} + +// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (num Number) Compare(other Value) int { + // Optimize for the common case, as calling Compare allocates on heap. + if otherNum, yes := other.(Number); yes { + if ai, ok := num.Int64(); ok { + if bi, ok := otherNum.Int64(); ok { + if ai == bi { + return 0 + } + if ai < bi { + return -1 + } + return 1 + } + } + } + + return Compare(num, other) +} + +// Find returns the current value or a not found error. +func (num Number) Find(path Ref) (Value, error) { + if len(path) == 0 { + return num, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (num Number) Hash() int { + f, err := json.Number(num).Float64() + if err != nil { + bs := []byte(num) + h := xxhash.Sum64(bs) + return int(h) + } + return int(f) +} + +// Int returns the int representation of num if possible. +func (num Number) Int() (int, bool) { + i64, ok := num.Int64() + return int(i64), ok +} + +// Int64 returns the int64 representation of num if possible. +func (num Number) Int64() (int64, bool) { + i, err := json.Number(num).Int64() + if err != nil { + return 0, false + } + return i, true +} + +// Float64 returns the float64 representation of num if possible. +func (num Number) Float64() (float64, bool) { + f, err := json.Number(num).Float64() + if err != nil { + return 0, false + } + return f, true +} + +// IsGround always returns true. +func (Number) IsGround() bool { + return true +} + +// MarshalJSON returns JSON encoded bytes representing num. +func (num Number) MarshalJSON() ([]byte, error) { + return json.Marshal(json.Number(num)) +} + +func (num Number) String() string { + return string(num) +} + +func intNumber(i int) Number { + return Number(strconv.Itoa(i)) +} + +func int64Number(i int64) Number { + return Number(strconv.FormatInt(i, 10)) +} + +func uint64Number(u uint64) Number { + return Number(strconv.FormatUint(u, 10)) +} + +func floatNumber(f float64) Number { + return Number(strconv.FormatFloat(f, 'g', -1, 64)) +} + +// String represents a string value as defined by JSON. +type String string + +// StringTerm creates a new Term with a String value. +func StringTerm(s string) *Term { + return &Term{Value: String(s)} +} + +// Equal returns true if the other Value is a String and is equal. +func (str String) Equal(other Value) bool { + switch other := other.(type) { + case String: + return str == other + default: + return false + } +} + +// Compare compares str to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (str String) Compare(other Value) int { + // Optimize for the common case of one string being compared to another by + // using a direct comparison of values. This avoids the allocation performed + // when calling Compare and its interface{} argument conversion. + if otherStr, ok := other.(String); ok { + if str == otherStr { + return 0 + } + if str < otherStr { + return -1 + } + return 1 + } + + return Compare(str, other) +} + +// Find returns the current value or a not found error. +func (str String) Find(path Ref) (Value, error) { + if len(path) == 0 { + return str, nil + } + return nil, errFindNotFound +} + +// IsGround always returns true. +func (String) IsGround() bool { + return true +} + +func (str String) String() string { + return strconv.Quote(string(str)) +} + +// Hash returns the hash code for the Value. +func (str String) Hash() int { + return int(xxhash.Sum64String(string(str))) +} + +// Var represents a variable as defined by the language. +type Var string + +// VarTerm creates a new Term with a Variable value. +func VarTerm(v string) *Term { + return &Term{Value: Var(v)} +} + +// Equal returns true if the other Value is a Variable and has the same value +// (name). +func (v Var) Equal(other Value) bool { + switch other := other.(type) { + case Var: + return v == other + default: + return false + } +} + +// Compare compares v to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (v Var) Compare(other Value) int { + if otherVar, ok := other.(Var); ok { + return strings.Compare(string(v), string(otherVar)) + } + return Compare(v, other) +} + +// Find returns the current value or a not found error. +func (v Var) Find(path Ref) (Value, error) { + if len(path) == 0 { + return v, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (v Var) Hash() int { + return int(xxhash.Sum64String(string(v))) +} + +// IsGround always returns false. +func (Var) IsGround() bool { + return false +} + +// IsWildcard returns true if this is a wildcard variable. +func (v Var) IsWildcard() bool { + return strings.HasPrefix(string(v), WildcardPrefix) +} + +// IsGenerated returns true if this variable was generated during compilation. +func (v Var) IsGenerated() bool { + return strings.HasPrefix(string(v), "__local") +} + +func (v Var) String() string { + // Special case for wildcard so that string representation is parseable. The + // parser mangles wildcard variables to make their names unique and uses an + // illegal variable name character (WildcardPrefix) to avoid conflicts. When + // we serialize the variable here, we need to make sure it's parseable. + if v.IsWildcard() { + return Wildcard.String() + } + return string(v) +} + +// Ref represents a reference as defined by the language. +type Ref []*Term + +// EmptyRef returns a new, empty reference. +func EmptyRef() Ref { + return Ref([]*Term{}) +} + +// PtrRef returns a new reference against the head for the pointer +// s. Path components in the pointer are unescaped. +func PtrRef(head *Term, s string) (Ref, error) { + s = strings.Trim(s, "/") + if s == "" { + return Ref{head}, nil + } + parts := strings.Split(s, "/") + if maxLen := math.MaxInt32; len(parts) >= maxLen { + return nil, fmt.Errorf("path too long: %s, %d > %d (max)", s, len(parts), maxLen) + } + ref := make(Ref, uint(len(parts))+1) + ref[0] = head + for i := 0; i < len(parts); i++ { + var err error + parts[i], err = url.PathUnescape(parts[i]) + if err != nil { + return nil, err + } + ref[i+1] = StringTerm(parts[i]) + } + return ref, nil +} + +// RefTerm creates a new Term with a Ref value. +func RefTerm(r ...*Term) *Term { + return &Term{Value: Ref(r)} +} + +// Append returns a copy of ref with the term appended to the end. +func (ref Ref) Append(term *Term) Ref { + n := len(ref) + dst := make(Ref, n+1) + copy(dst, ref) + dst[n] = term + return dst +} + +// Insert returns a copy of the ref with x inserted at pos. If pos < len(ref), +// existing elements are shifted to the right. If pos > len(ref)+1 this +// function panics. +func (ref Ref) Insert(x *Term, pos int) Ref { + switch { + case pos == len(ref): + return ref.Append(x) + case pos > len(ref)+1: + panic("illegal index") + } + cpy := make(Ref, len(ref)+1) + copy(cpy, ref[:pos]) + cpy[pos] = x + copy(cpy[pos+1:], ref[pos:]) + return cpy +} + +// Extend returns a copy of ref with the terms from other appended. The head of +// other will be converted to a string. +func (ref Ref) Extend(other Ref) Ref { + dst := make(Ref, len(ref)+len(other)) + copy(dst, ref) + + head := other[0].Copy() + head.Value = String(head.Value.(Var)) + offset := len(ref) + dst[offset] = head + + copy(dst[offset+1:], other[1:]) + return dst +} + +// Concat returns a ref with the terms appended. +func (ref Ref) Concat(terms []*Term) Ref { + if len(terms) == 0 { + return ref + } + cpy := make(Ref, len(ref)+len(terms)) + copy(cpy, ref) + copy(cpy[len(ref):], terms) + return cpy +} + +// Dynamic returns the offset of the first non-constant operand of ref. +func (ref Ref) Dynamic() int { + switch ref[0].Value.(type) { + case Call: + return 0 + } + for i := 1; i < len(ref); i++ { + if !IsConstant(ref[i].Value) { + return i + } + } + return -1 +} + +// Copy returns a deep copy of ref. +func (ref Ref) Copy() Ref { + return termSliceCopy(ref) +} + +// CopyNonGround returns a new ref with deep copies of the non-ground parts and shallow +// copies of the ground parts. This is a *much* cheaper operation than Copy for operations +// that only intend to modify (e.g. plug) the non-ground parts. The head element of the ref +// is always shallow copied. +func (ref Ref) CopyNonGround() Ref { + cpy := make(Ref, len(ref)) + cpy[0] = ref[0] + + for i := 1; i < len(ref); i++ { + if ref[i].Value.IsGround() { + cpy[i] = ref[i] + } else { + cpy[i] = ref[i].Copy() + } + } + + return cpy +} + +// Equal returns true if ref is equal to other. +func (ref Ref) Equal(other Value) bool { + switch o := other.(type) { + case Ref: + if len(ref) == len(o) { + for i := range ref { + if !ref[i].Equal(o[i]) { + return false + } + } + + return true + } + } + + return false +} + +// Compare compares ref to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (ref Ref) Compare(other Value) int { + if o, ok := other.(Ref); ok { + return termSliceCompare(ref, o) + } + + return Compare(ref, other) +} + +// Find returns the current value or a "not found" error. +func (ref Ref) Find(path Ref) (Value, error) { + if len(path) == 0 { + return ref, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (ref Ref) Hash() int { + return termSliceHash(ref) +} + +// HasPrefix returns true if the other ref is a prefix of this ref. +func (ref Ref) HasPrefix(other Ref) bool { + if len(other) > len(ref) { + return false + } + for i := range other { + if !ref[i].Equal(other[i]) { + return false + } + } + return true +} + +// ConstantPrefix returns the constant portion of the ref starting from the head. +func (ref Ref) ConstantPrefix() Ref { + i := ref.Dynamic() + if i < 0 { + return ref.Copy() + } + return ref[:i].Copy() +} + +func (ref Ref) StringPrefix() Ref { + for i := 1; i < len(ref); i++ { + switch ref[i].Value.(type) { + case String: // pass + default: // cut off + return ref[:i].Copy() + } + } + + return ref.Copy() +} + +// GroundPrefix returns the ground portion of the ref starting from the head. By +// definition, the head of the reference is always ground. +func (ref Ref) GroundPrefix() Ref { + if ref.IsGround() { + return ref + } + + prefix := make(Ref, 0, len(ref)) + + for i, x := range ref { + if i > 0 && !x.IsGround() { + break + } + prefix = append(prefix, x) + } + + return prefix +} + +func (ref Ref) DynamicSuffix() Ref { + i := ref.Dynamic() + if i < 0 { + return nil + } + return ref[i:] +} + +// IsGround returns true if all of the parts of the Ref are ground. +func (ref Ref) IsGround() bool { + if len(ref) == 0 { + return true + } + return termSliceIsGround(ref[1:]) +} + +// IsNested returns true if this ref contains other Refs. +func (ref Ref) IsNested() bool { + for _, x := range ref { + if _, ok := x.Value.(Ref); ok { + return true + } + } + return false +} + +// Ptr returns a slash-separated path string for this ref. If the ref +// contains non-string terms this function returns an error. Path +// components are escaped. +func (ref Ref) Ptr() (string, error) { + parts := make([]string, 0, len(ref)-1) + for _, term := range ref[1:] { + if str, ok := term.Value.(String); ok { + parts = append(parts, url.PathEscape(string(str))) + } else { + return "", errors.New("invalid path value type") + } + } + return strings.Join(parts, "/"), nil +} + +var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") + +func IsVarCompatibleString(s string) bool { + return varRegexp.MatchString(s) +} + +func (ref Ref) String() string { + if len(ref) == 0 { + return "" + } + + sb := sbPool.Get() + defer sbPool.Put(sb) + + sb.Grow(10 * len(ref)) + + sb.WriteString(ref[0].Value.String()) + + for _, p := range ref[1:] { + switch p := p.Value.(type) { + case String: + str := string(p) + if varRegexp.MatchString(str) && !IsKeyword(str) { + sb.WriteByte('.') + sb.WriteString(str) + } else { + sb.WriteString(`["`) + sb.WriteString(str) + sb.WriteString(`"]`) + } + default: + sb.WriteByte('[') + sb.WriteString(p.String()) + sb.WriteByte(']') + } + } + + return sb.String() +} + +// OutputVars returns a VarSet containing variables that would be bound by evaluating +// this expression in isolation. +func (ref Ref) OutputVars() VarSet { + vis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) + vis.Walk(ref) + return vis.Vars() +} + +func (ref Ref) toArray() *Array { + a := NewArray() + for _, term := range ref { + if _, ok := term.Value.(String); ok { + a = a.Append(term) + } else { + a = a.Append(StringTerm(term.Value.String())) + } + } + return a +} + +// QueryIterator defines the interface for querying AST documents with references. +type QueryIterator func(map[Var]Value, Value) error + +// ArrayTerm creates a new Term with an Array value. +func ArrayTerm(a ...*Term) *Term { + return NewTerm(NewArray(a...)) +} + +// NewArray creates an Array with the terms provided. The array will +// use the provided term slice. +func NewArray(a ...*Term) *Array { + hs := make([]int, len(a)) + for i, e := range a { + hs[i] = e.Value.Hash() + } + arr := &Array{elems: a, hashs: hs, ground: termSliceIsGround(a)} + arr.rehash() + return arr +} + +// Array represents an array as defined by the language. Arrays are similar to the +// same types as defined by JSON with the exception that they can contain Vars +// and References. +type Array struct { + elems []*Term + hashs []int // element hashes + hash int + ground bool +} + +// Copy returns a deep copy of arr. +func (arr *Array) Copy() *Array { + cpy := make([]int, len(arr.elems)) + copy(cpy, arr.hashs) + return &Array{ + elems: termSliceCopy(arr.elems), + hashs: cpy, + hash: arr.hash, + ground: arr.IsGround()} +} + +// Equal returns true if arr is equal to other. +func (arr *Array) Equal(other Value) bool { + if arr == other { + return true + } + + if other, ok := other.(*Array); ok && len(arr.elems) == len(other.elems) { + for i := range arr.elems { + if !arr.elems[i].Equal(other.elems[i]) { + return false + } + } + return true + } + + return false +} + +// Compare compares arr to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (arr *Array) Compare(other Value) int { + if b, ok := other.(*Array); ok { + return termSliceCompare(arr.elems, b.elems) + } + + sortA := sortOrder(arr) + sortB := sortOrder(other) + + if sortA < sortB { + return -1 + } else if sortB < sortA { + return 1 + } + + return Compare(arr, other) +} + +// Find returns the value at the index or an out-of-range error. +func (arr *Array) Find(path Ref) (Value, error) { + if len(path) == 0 { + return arr, nil + } + num, ok := path[0].Value.(Number) + if !ok { + return nil, errFindNotFound + } + i, ok := num.Int() + if !ok { + return nil, errFindNotFound + } + if i < 0 || i >= arr.Len() { + return nil, errFindNotFound + } + + term := arr.Elem(i) + // Using Find on scalar values costs an allocation (type -> Value conversion) + // and since we already have the Value here, we can avoid that. + if len(path) == 1 && IsScalar(term.Value) { + return term.Value, nil + } + + return term.Value.Find(path[1:]) +} + +// Get returns the element at pos or nil if not possible. +func (arr *Array) Get(pos *Term) *Term { + num, ok := pos.Value.(Number) + if !ok { + return nil + } + + i, ok := num.Int() + if !ok { + return nil + } + + if i >= 0 && i < len(arr.elems) { + return arr.elems[i] + } + + return nil +} + +// Sorted returns a new Array that contains the sorted elements of arr. +func (arr *Array) Sorted() *Array { + cpy := make([]*Term, len(arr.elems)) + for i := range cpy { + cpy[i] = arr.elems[i] + } + + slices.SortFunc(cpy, TermValueCompare) + + a := NewArray(cpy...) + a.hashs = arr.hashs + return a +} + +// Hash returns the hash code for the Value. +func (arr *Array) Hash() int { + return arr.hash +} + +// IsGround returns true if all of the Array elements are ground. +func (arr *Array) IsGround() bool { + return arr.ground +} + +// MarshalJSON returns JSON encoded bytes representing arr. +func (arr *Array) MarshalJSON() ([]byte, error) { + if len(arr.elems) == 0 { + return []byte(`[]`), nil + } + return json.Marshal(arr.elems) +} + +func (arr *Array) String() string { + sb := sbPool.Get() + sb.Grow(len(arr.elems) * 16) + + defer sbPool.Put(sb) + + sb.WriteByte('[') + for i, e := range arr.elems { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(e.String()) + } + sb.WriteByte(']') + + return sb.String() +} + +// Len returns the number of elements in the array. +func (arr *Array) Len() int { + return len(arr.elems) +} + +// Elem returns the element i of arr. +func (arr *Array) Elem(i int) *Term { + return arr.elems[i] +} + +// Set sets the element i of arr. +func (arr *Array) Set(i int, v *Term) { + arr.set(i, v) +} + +// rehash updates the cached hash of arr. +func (arr *Array) rehash() { + arr.hash = 0 + for _, h := range arr.hashs { + arr.hash += h + } +} + +// set sets the element i of arr. +func (arr *Array) set(i int, v *Term) { + arr.ground = arr.ground && v.IsGround() + arr.elems[i] = v + arr.hashs[i] = v.Value.Hash() + arr.rehash() +} + +// Slice returns a slice of arr starting from i index to j. -1 +// indicates the end of the array. The returned value array is not a +// copy and any modifications to either of arrays may be reflected to +// the other. +func (arr *Array) Slice(i, j int) *Array { + var elems []*Term + var hashs []int + if j == -1 { + elems = arr.elems[i:] + hashs = arr.hashs[i:] + } else { + elems = arr.elems[i:j] + hashs = arr.hashs[i:j] + } + // If arr is ground, the slice is, too. + // If it's not, the slice could still be. + gr := arr.ground || termSliceIsGround(elems) + + s := &Array{elems: elems, hashs: hashs, ground: gr} + s.rehash() + return s +} + +// Iter calls f on each element in arr. If f returns an error, +// iteration stops and the return value is the error. +func (arr *Array) Iter(f func(*Term) error) error { + for i := range arr.elems { + if err := f(arr.elems[i]); err != nil { + return err + } + } + return nil +} + +// Until calls f on each element in arr. If f returns true, iteration stops. +func (arr *Array) Until(f func(*Term) bool) bool { + for _, term := range arr.elems { + if f(term) { + return true + } + } + return false +} + +// Foreach calls f on each element in arr. +func (arr *Array) Foreach(f func(*Term)) { + for _, term := range arr.elems { + f(term) + } +} + +// Append appends a term to arr, returning the appended array. +func (arr *Array) Append(v *Term) *Array { + cpy := *arr + cpy.elems = append(arr.elems, v) + cpy.hashs = append(arr.hashs, v.Value.Hash()) + cpy.hash = arr.hash + v.Value.Hash() + cpy.ground = arr.ground && v.IsGround() + return &cpy +} + +// Set represents a set as defined by the language. +type Set interface { + Value + Len() int + Copy() Set + Diff(Set) Set + Intersect(Set) Set + Union(Set) Set + Add(*Term) + Iter(func(*Term) error) error + Until(func(*Term) bool) bool + Foreach(func(*Term)) + Contains(*Term) bool + Map(func(*Term) (*Term, error)) (Set, error) + Reduce(*Term, func(*Term, *Term) (*Term, error)) (*Term, error) + Sorted() *Array + Slice() []*Term +} + +// NewSet returns a new Set containing t. +func NewSet(t ...*Term) Set { + s := newset(len(t)) + for _, term := range t { + s.insert(term, false) + } + return s +} + +func newset(n int) *set { + var keys []*Term + if n > 0 { + keys = make([]*Term, 0, n) + } + return &set{ + elems: make(map[int]*Term, n), + keys: keys, + hash: 0, + ground: true, + sortGuard: sync.Once{}, + } +} + +// SetTerm returns a new Term representing a set containing terms t. +func SetTerm(t ...*Term) *Term { + set := NewSet(t...) + return &Term{ + Value: set, + } +} + +type set struct { + elems map[int]*Term + keys []*Term + hash int + ground bool + // Prevents race condition around sorting. + // We can avoid (the allocation cost of) using a pointer here as all + // methods of `set` use a pointer receiver, and the `sync.Once` value + // is never copied. + sortGuard sync.Once +} + +// Copy returns a deep copy of s. +func (s *set) Copy() Set { + terms := make([]*Term, len(s.keys)) + for i := range s.keys { + terms[i] = s.keys[i].Copy() + } + cpy := NewSet(terms...).(*set) + cpy.hash = s.hash + cpy.ground = s.ground + return cpy +} + +// IsGround returns true if all terms in s are ground. +func (s *set) IsGround() bool { + return s.ground +} + +// Hash returns a hash code for s. +func (s *set) Hash() int { + return s.hash +} + +func (s *set) String() string { + if s.Len() == 0 { + return "set()" + } + + sb := sbPool.Get() + sb.Grow(s.Len() * 16) + + defer sbPool.Put(sb) + + sb.WriteByte('{') + for i := range s.sortedKeys() { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(s.keys[i].Value.String()) + } + sb.WriteByte('}') + + return sb.String() +} + +func (s *set) sortedKeys() []*Term { + s.sortGuard.Do(func() { + slices.SortFunc(s.keys, TermValueCompare) + }) + return s.keys +} + +// Compare compares s to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (s *set) Compare(other Value) int { + o1 := sortOrder(s) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o1 > o2 { + return 1 + } + t := other.(*set) + return termSliceCompare(s.sortedKeys(), t.sortedKeys()) +} + +// Find returns the set or dereferences the element itself. +func (s *set) Find(path Ref) (Value, error) { + if len(path) == 0 { + return s, nil + } + if !s.Contains(path[0]) { + return nil, errFindNotFound + } + return path[0].Value.Find(path[1:]) +} + +// Diff returns elements in s that are not in other. +func (s *set) Diff(other Set) Set { + terms := make([]*Term, 0, len(s.keys)) + for _, term := range s.sortedKeys() { + if !other.Contains(term) { + terms = append(terms, term) + } + } + + return NewSet(terms...) +} + +// Intersect returns the set containing elements in both s and other. +func (s *set) Intersect(other Set) Set { + o := other.(*set) + n, m := s.Len(), o.Len() + ss := s + so := o + if m < n { + ss = o + so = s + n = m + } + + terms := make([]*Term, 0, n) + for _, term := range ss.sortedKeys() { + if so.Contains(term) { + terms = append(terms, term) + } + } + + return NewSet(terms...) +} + +// Union returns the set containing all elements of s and other. +func (s *set) Union(other Set) Set { + r := NewSet() + s.Foreach(r.Add) + other.Foreach(r.Add) + return r +} + +// Add updates s to include t. +func (s *set) Add(t *Term) { + s.insert(t, true) +} + +// Iter calls f on each element in s. If f returns an error, iteration stops +// and the return value is the error. +func (s *set) Iter(f func(*Term) error) error { + for _, term := range s.sortedKeys() { + if err := f(term); err != nil { + return err + } + } + return nil +} + +// Until calls f on each element in s. If f returns true, iteration stops. +func (s *set) Until(f func(*Term) bool) bool { + for _, term := range s.sortedKeys() { + if f(term) { + return true + } + } + return false +} + +// Foreach calls f on each element in s. +func (s *set) Foreach(f func(*Term)) { + for _, term := range s.sortedKeys() { + f(term) + } +} + +// Map returns a new Set obtained by applying f to each value in s. +func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) { + mapped := make([]*Term, 0, len(s.keys)) + for _, x := range s.sortedKeys() { + term, err := f(x) + if err != nil { + return nil, err + } + mapped = append(mapped, term) + } + return NewSet(mapped...), nil +} + +// Reduce returns a Term produced by applying f to each value in s. The first +// argument to f is the reduced value (starting with i) and the second argument +// to f is the element in s. +func (s *set) Reduce(i *Term, f func(*Term, *Term) (*Term, error)) (*Term, error) { + err := s.Iter(func(x *Term) error { + var err error + i, err = f(i, x) + if err != nil { + return err + } + return nil + }) + return i, err +} + +// Contains returns true if t is in s. +func (s *set) Contains(t *Term) bool { + return s.get(t) != nil +} + +// Len returns the number of elements in the set. +func (s *set) Len() int { + return len(s.keys) +} + +// MarshalJSON returns JSON encoded bytes representing s. +func (s *set) MarshalJSON() ([]byte, error) { + if s.keys == nil { + return []byte(`[]`), nil + } + return json.Marshal(s.sortedKeys()) +} + +// Sorted returns an Array that contains the sorted elements of s. +func (s *set) Sorted() *Array { + cpy := make([]*Term, len(s.keys)) + copy(cpy, s.sortedKeys()) + return NewArray(cpy...) +} + +// Slice returns a slice of terms contained in the set. +func (s *set) Slice() []*Term { + return s.sortedKeys() +} + +// NOTE(philipc): We assume a many-readers, single-writer model here. +// This method should NOT be used concurrently, or else we risk data races. +func (s *set) insert(x *Term, resetSortGuard bool) { + hash := x.Hash() + insertHash := hash + // This `equal` utility is duplicated and manually inlined a number of + // time in this file. Inlining it avoids heap allocations, so it makes + // a big performance difference: some operations like lookup become twice + // as slow without it. + var equal func(v Value) bool + + switch x := x.Value.(type) { + case Null, Boolean, String, Var: + equal = func(y Value) bool { return x == y } + case Number: + if xi, err := json.Number(x).Int64(); err == nil { + equal = func(y Value) bool { + if y, ok := y.(Number); ok { + if yi, err := json.Number(y).Int64(); err == nil { + return xi == yi + } + } + + return false + } + break + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + equal = func(b Value) bool { + if bNum, ok := b.(Number); ok { + var b *big.Rat + fb, ok := new(big.Float).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) == 0 + } + + return false + } + default: + equal = func(y Value) bool { return Compare(x, y) == 0 } + } + + for curr, ok := s.elems[insertHash]; ok; { + if equal(curr.Value) { + return + } + + insertHash++ + curr, ok = s.elems[insertHash] + } + + s.elems[insertHash] = x + // O(1) insertion, but we'll have to re-sort the keys later. + s.keys = append(s.keys, x) + + if resetSortGuard { + // Reset the sync.Once instance. + // See https://github.com/golang/go/issues/25955 for why we do it this way. + // Note that this will always be the case when external code calls insert via + // Add, or otherwise. Internal code may however benefit from not having to + // re-create this pointer when it's known not to be needed. + s.sortGuard = sync.Once{} + } + + s.hash += hash + s.ground = s.ground && x.IsGround() +} + +func (s *set) get(x *Term) *Term { + hash := x.Hash() + // This `equal` utility is duplicated and manually inlined a number of + // time in this file. Inlining it avoids heap allocations, so it makes + // a big performance difference: some operations like lookup become twice + // as slow without it. + var equal func(v Value) bool + + switch x := x.Value.(type) { + case Null, Boolean, String, Var: + equal = func(y Value) bool { return x == y } + case Number: + if xi, err := json.Number(x).Int64(); err == nil { + equal = func(y Value) bool { + if y, ok := y.(Number); ok { + if yi, err := json.Number(y).Int64(); err == nil { + return xi == yi + } + } + + return false + } + break + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + equal = func(b Value) bool { + if bNum, ok := b.(Number); ok { + var b *big.Rat + fb, ok := new(big.Float).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) == 0 + } + return false + + } + + default: + equal = func(y Value) bool { return Compare(x, y) == 0 } + } + + for curr, ok := s.elems[hash]; ok; { + if equal(curr.Value) { + return curr + } + + hash++ + curr, ok = s.elems[hash] + } + return nil +} + +// Object represents an object as defined by the language. +type Object interface { + Value + Len() int + Get(*Term) *Term + Copy() Object + Insert(*Term, *Term) + Iter(func(*Term, *Term) error) error + Until(func(*Term, *Term) bool) bool + Foreach(func(*Term, *Term)) + Map(func(*Term, *Term) (*Term, *Term, error)) (Object, error) + Diff(other Object) Object + Intersect(other Object) [][3]*Term + Merge(other Object) (Object, bool) + MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) + Filter(filter Object) (Object, error) + Keys() []*Term + KeysIterator() ObjectKeysIterator + get(k *Term) *objectElem // To prevent external implementations +} + +// NewObject creates a new Object with t. +func NewObject(t ...[2]*Term) Object { + obj := newobject(len(t)) + for i := range t { + obj.insert(t[i][0], t[i][1], false) + } + return obj +} + +// ObjectTerm creates a new Term with an Object value. +func ObjectTerm(o ...[2]*Term) *Term { + return &Term{Value: NewObject(o...)} +} + +func LazyObject(blob map[string]interface{}) Object { + return &lazyObj{native: blob, cache: map[string]Value{}} +} + +type lazyObj struct { + strict Object + cache map[string]Value + native map[string]interface{} +} + +func (l *lazyObj) force() Object { + if l.strict == nil { + l.strict = MustInterfaceToValue(l.native).(Object) + // NOTE(jf): a possible performance improvement here would be to check how many + // entries have been realized to AST in the cache, and if some threshold compared to the + // total number of keys is exceeded, realize the remaining entries and set l.strict to l.cache. + l.cache = map[string]Value{} // We don't need the cache anymore; drop it to free up memory. + } + return l.strict +} + +func (l *lazyObj) Compare(other Value) int { + o1 := sortOrder(l) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } + return l.force().Compare(other) +} + +func (l *lazyObj) Copy() Object { + return l +} + +func (l *lazyObj) Diff(other Object) Object { + return l.force().Diff(other) +} + +func (l *lazyObj) Intersect(other Object) [][3]*Term { + return l.force().Intersect(other) +} + +func (l *lazyObj) Iter(f func(*Term, *Term) error) error { + return l.force().Iter(f) +} + +func (l *lazyObj) Until(f func(*Term, *Term) bool) bool { + // NOTE(sr): there could be benefits in not forcing here -- if we abort because + // `f` returns true, we could save us from converting the rest of the object. + return l.force().Until(f) +} + +func (l *lazyObj) Foreach(f func(*Term, *Term)) { + l.force().Foreach(f) +} + +func (l *lazyObj) Filter(filter Object) (Object, error) { + return l.force().Filter(filter) +} + +func (l *lazyObj) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { + return l.force().Map(f) +} + +func (l *lazyObj) MarshalJSON() ([]byte, error) { + return l.force().(*object).MarshalJSON() +} + +func (l *lazyObj) Merge(other Object) (Object, bool) { + return l.force().Merge(other) +} + +func (l *lazyObj) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { + return l.force().MergeWith(other, conflictResolver) +} + +func (l *lazyObj) Len() int { + return len(l.native) +} + +func (l *lazyObj) String() string { + return l.force().String() +} + +// get is merely there to implement the Object interface -- `get` there serves the +// purpose of prohibiting external implementations. It's never called for lazyObj. +func (*lazyObj) get(*Term) *objectElem { + return nil +} + +func (l *lazyObj) Get(k *Term) *Term { + if l.strict != nil { + return l.strict.Get(k) + } + if s, ok := k.Value.(String); ok { + if v, ok := l.cache[string(s)]; ok { + return NewTerm(v) + } + + if val, ok := l.native[string(s)]; ok { + var converted Value + switch val := val.(type) { + case map[string]interface{}: + converted = LazyObject(val) + default: + converted = MustInterfaceToValue(val) + } + l.cache[string(s)] = converted + return NewTerm(converted) + } + } + return nil +} + +func (l *lazyObj) Insert(k, v *Term) { + l.force().Insert(k, v) +} + +func (*lazyObj) IsGround() bool { + return true +} + +func (l *lazyObj) Hash() int { + return l.force().Hash() +} + +func (l *lazyObj) Keys() []*Term { + if l.strict != nil { + return l.strict.Keys() + } + ret := make([]*Term, 0, len(l.native)) + for k := range l.native { + ret = append(ret, StringTerm(k)) + } + slices.SortFunc(ret, TermValueCompare) + + return ret +} + +func (l *lazyObj) KeysIterator() ObjectKeysIterator { + return &lazyObjKeysIterator{keys: l.Keys()} +} + +type lazyObjKeysIterator struct { + current int + keys []*Term +} + +func (ki *lazyObjKeysIterator) Next() (*Term, bool) { + if ki.current == len(ki.keys) { + return nil, false + } + ki.current++ + return ki.keys[ki.current-1], true +} + +func (l *lazyObj) Find(path Ref) (Value, error) { + if l.strict != nil { + return l.strict.Find(path) + } + if len(path) == 0 { + return l, nil + } + if p0, ok := path[0].Value.(String); ok { + if v, ok := l.cache[string(p0)]; ok { + return v.Find(path[1:]) + } + + if v, ok := l.native[string(p0)]; ok { + var converted Value + switch v := v.(type) { + case map[string]interface{}: + converted = LazyObject(v) + default: + converted = MustInterfaceToValue(v) + } + l.cache[string(p0)] = converted + return converted.Find(path[1:]) + } + } + return nil, errFindNotFound +} + +type object struct { + elems map[int]*objectElem + keys objectElemSlice + ground int // number of key and value grounds. Counting is + // required to support insert's key-value replace. + hash int + sortGuard sync.Once // Prevents race condition around sorting. +} + +func newobject(n int) *object { + var keys objectElemSlice + if n > 0 { + keys = make(objectElemSlice, 0, n) + } + return &object{ + elems: make(map[int]*objectElem, n), + keys: keys, + ground: 0, + hash: 0, + sortGuard: sync.Once{}, + } +} + +type objectElem struct { + key *Term + value *Term + next *objectElem +} + +type objectElemSlice []*objectElem + +func (s objectElemSlice) Less(i, j int) bool { return Compare(s[i].key.Value, s[j].key.Value) < 0 } +func (s objectElemSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s objectElemSlice) Len() int { return len(s) } + +// Item is a helper for constructing an tuple containing two Terms +// representing a key/value pair in an Object. +func Item(key, value *Term) [2]*Term { + return [2]*Term{key, value} +} + +func (obj *object) sortedKeys() objectElemSlice { + obj.sortGuard.Do(func() { + slices.SortFunc(obj.keys, func(a, b *objectElem) int { + return a.key.Value.Compare(b.key.Value) + }) + }) + return obj.keys +} + +// Compare compares obj to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (obj *object) Compare(other Value) int { + if x, ok := other.(*lazyObj); ok { + other = x.force() + } + o1 := sortOrder(obj) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } + a := obj + b := other.(*object) + // Ensure that keys are in canonical sorted order before use! + akeys := a.sortedKeys() + bkeys := b.sortedKeys() + minLen := len(akeys) + if len(b.keys) < len(akeys) { + minLen = len(bkeys) + } + for i := range minLen { + keysCmp := Compare(akeys[i].key, bkeys[i].key) + if keysCmp < 0 { + return -1 + } + if keysCmp > 0 { + return 1 + } + valA := akeys[i].value + valB := bkeys[i].value + valCmp := Compare(valA, valB) + if valCmp != 0 { + return valCmp + } + } + if len(akeys) < len(bkeys) { + return -1 + } + if len(bkeys) < len(akeys) { + return 1 + } + return 0 +} + +// Find returns the value at the key or undefined. +func (obj *object) Find(path Ref) (Value, error) { + if len(path) == 0 { + return obj, nil + } + term := obj.Get(path[0]) + if term == nil { + return nil, errFindNotFound + } + // Using Find on scalar values costs an allocation (type -> Value conversion) + // and since we already have the Value here, we can avoid that. + if len(path) == 1 && IsScalar(term.Value) { + return term.Value, nil + } + + return term.Value.Find(path[1:]) +} + +func (obj *object) Insert(k, v *Term) { + obj.insert(k, v, true) +} + +// Get returns the value of k in obj if k exists, otherwise nil. +func (obj *object) Get(k *Term) *Term { + if elem := obj.get(k); elem != nil { + return elem.value + } + return nil +} + +// Hash returns the hash code for the Value. +func (obj *object) Hash() int { + return obj.hash +} + +// IsGround returns true if all of the Object key/value pairs are ground. +func (obj *object) IsGround() bool { + return obj.ground == 2*len(obj.keys) +} + +// Copy returns a deep copy of obj. +func (obj *object) Copy() Object { + cpy, _ := obj.Map(func(k, v *Term) (*Term, *Term, error) { + return k.Copy(), v.Copy(), nil + }) + cpy.(*object).hash = obj.hash + return cpy +} + +// Diff returns a new Object that contains only the key/value pairs that exist in obj. +func (obj *object) Diff(other Object) Object { + r := newobject(obj.Len()) + for _, node := range obj.sortedKeys() { + if other.Get(node.key) == nil { + r.insert(node.key, node.value, false) + } + } + return r +} + +// Intersect returns a slice of term triplets that represent the intersection of keys +// between obj and other. For each intersecting key, the values from obj and other are included +// as the last two terms in the triplet (respectively). +func (obj *object) Intersect(other Object) [][3]*Term { + r := [][3]*Term{} + obj.Foreach(func(k, v *Term) { + if v2 := other.Get(k); v2 != nil { + r = append(r, [3]*Term{k, v, v2}) + } + }) + return r +} + +// Iter calls the function f for each key-value pair in the object. If f +// returns an error, iteration stops and the error is returned. +func (obj *object) Iter(f func(*Term, *Term) error) error { + for _, node := range obj.sortedKeys() { + if err := f(node.key, node.value); err != nil { + return err + } + } + return nil +} + +// Until calls f for each key-value pair in the object. If f returns +// true, iteration stops and Until returns true. Otherwise, return +// false. +func (obj *object) Until(f func(*Term, *Term) bool) bool { + for _, node := range obj.sortedKeys() { + if f(node.key, node.value) { + return true + } + } + return false +} + +// Foreach calls f for each key-value pair in the object. +func (obj *object) Foreach(f func(*Term, *Term)) { + for _, node := range obj.sortedKeys() { + f(node.key, node.value) + } +} + +// Map returns a new Object constructed by mapping each element in the object +// using the function f. If f returns an error, the error is returned by Map. +// If f return a nil key, the element is skipped. +func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { + cpy := newobject(obj.Len()) + for _, node := range obj.sortedKeys() { + k, v, err := f(node.key, node.value) + if err != nil { + return nil, err + } + if k != nil { + cpy.insert(k, v, false) + } + } + return cpy, nil +} + +// Keys returns the keys of obj. +func (obj *object) Keys() []*Term { + keys := make([]*Term, len(obj.keys)) + + for i, elem := range obj.sortedKeys() { + keys[i] = elem.key + } + + return keys +} + +// Returns an iterator over the obj's keys. +func (obj *object) KeysIterator() ObjectKeysIterator { + return newobjectKeysIterator(obj) +} + +// MarshalJSON returns JSON encoded bytes representing obj. +func (obj *object) MarshalJSON() ([]byte, error) { + sl := make([][2]*Term, obj.Len()) + for i, node := range obj.sortedKeys() { + sl[i] = Item(node.key, node.value) + } + return json.Marshal(sl) +} + +// Merge returns a new Object containing the non-overlapping keys of obj and other. If there are +// overlapping keys between obj and other, the values of associated with the keys are merged. Only +// objects can be merged with other objects. If the values cannot be merged, the second turn value +// will be false. +func (obj *object) Merge(other Object) (Object, bool) { + return obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) { + obj1, ok1 := v1.Value.(Object) + obj2, ok2 := v2.Value.(Object) + if !ok1 || !ok2 { + return nil, true + } + obj3, ok := obj1.Merge(obj2) + if !ok { + return nil, true + } + return NewTerm(obj3), false + }) +} + +// MergeWith returns a new Object containing the merged keys of obj and other. +// If there are overlapping keys between obj and other, the conflictResolver +// is called. The conflictResolver can return a merged value and a boolean +// indicating if the merge has failed and should stop. +func (obj *object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { + result := NewObject() + stop := obj.Until(func(k, v *Term) bool { + v2 := other.Get(k) + // The key didn't exist in other, keep the original value + if v2 == nil { + result.Insert(k, v) + return false + } + + // The key exists in both, resolve the conflict if possible + merged, stop := conflictResolver(v, v2) + if !stop { + result.Insert(k, merged) + } + return stop + }) + + if stop { + return nil, false + } + + // Copy in any values from other for keys that don't exist in obj + other.Foreach(func(k, v *Term) { + if v2 := obj.Get(k); v2 == nil { + result.Insert(k, v) + } + }) + return result, true +} + +// Filter returns a new object from values in obj where the keys are +// found in filter. Array indices for values can be specified as +// number strings. +func (obj *object) Filter(filter Object) (Object, error) { + filtered, err := filterObject(obj, filter) + if err != nil { + return nil, err + } + return filtered.(Object), nil +} + +// Len returns the number of elements in the object. +func (obj *object) Len() int { + return len(obj.keys) +} + +func (obj *object) String() string { + sb := sbPool.Get() + sb.Grow(obj.Len() * 32) + + defer sbPool.Put(sb) + + sb.WriteByte('{') + + for i, elem := range obj.sortedKeys() { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(elem.key.String()) + sb.WriteString(": ") + sb.WriteString(elem.value.String()) + } + sb.WriteByte('}') + + return sb.String() +} + +func (obj *object) get(k *Term) *objectElem { + hash := k.Hash() + + // This `equal` utility is duplicated and manually inlined a number of + // time in this file. Inlining it avoids heap allocations, so it makes + // a big performance difference: some operations like lookup become twice + // as slow without it. + var equal func(v Value) bool + + switch x := k.Value.(type) { + case Null, Boolean, String, Var: + equal = func(y Value) bool { return x == y } + case Number: + if xi, ok := x.Int64(); ok { + equal = func(y Value) bool { + if y, ok := y.(Number); ok { + if yi, ok := y.Int64(); ok { + return xi == yi + } + } + + return false + } + break + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + equal = func(b Value) bool { + if bNum, ok := b.(Number); ok { + var b *big.Rat + fb, ok := new(big.Float).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) == 0 + } + + return false + } + default: + equal = func(y Value) bool { return Compare(x, y) == 0 } + } + + for curr := obj.elems[hash]; curr != nil; curr = curr.next { + if equal(curr.key.Value) { + return curr + } + } + return nil +} + +// NOTE(philipc): We assume a many-readers, single-writer model here. +// This method should NOT be used concurrently, or else we risk data races. +func (obj *object) insert(k, v *Term, resetSortGuard bool) { + hash := k.Hash() + head := obj.elems[hash] + // This `equal` utility is duplicated and manually inlined a number of + // time in this file. Inlining it avoids heap allocations, so it makes + // a big performance difference: some operations like lookup become twice + // as slow without it. + var equal func(v Value) bool + + switch x := k.Value.(type) { + case Null, Boolean, String, Var: + equal = func(y Value) bool { return x == y } + case Number: + if xi, err := json.Number(x).Int64(); err == nil { + equal = func(y Value) bool { + if y, ok := y.(Number); ok { + if yi, err := json.Number(y).Int64(); err == nil { + return xi == yi + } + } + + return false + } + break + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + equal = func(b Value) bool { + if bNum, ok := b.(Number); ok { + var b *big.Rat + fb, ok := new(big.Float).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) == 0 + } + + return false + } + default: + equal = func(y Value) bool { return Compare(x, y) == 0 } + } + + for curr := head; curr != nil; curr = curr.next { + if equal(curr.key.Value) { + // The ground bit of the value may change in + // replace, hence adjust the counter per old + // and new value. + + if curr.value.IsGround() { + obj.ground-- + } + if v.IsGround() { + obj.ground++ + } + + curr.value = v + + obj.rehash() + return + } + } + elem := &objectElem{ + key: k, + value: v, + next: head, + } + obj.elems[hash] = elem + // O(1) insertion, but we'll have to re-sort the keys later. + obj.keys = append(obj.keys, elem) + + if resetSortGuard { + // Reset the sync.Once instance. + // See https://github.com/golang/go/issues/25955 for why we do it this way. + // Note that this will always be the case when external code calls insert via + // Add, or otherwise. Internal code may however benefit from not having to + // re-create this when it's known not to be needed. + obj.sortGuard = sync.Once{} + } + + obj.hash += hash + v.Hash() + + if k.IsGround() { + obj.ground++ + } + if v.IsGround() { + obj.ground++ + } +} + +func (obj *object) rehash() { + // obj.keys is considered truth, from which obj.hash and obj.elems are recalculated. + + obj.hash = 0 + obj.elems = make(map[int]*objectElem, len(obj.keys)) + + for _, elem := range obj.keys { + hash := elem.key.Hash() + obj.hash += hash + elem.value.Hash() + obj.elems[hash] = elem + } +} + +func filterObject(o Value, filter Value) (Value, error) { + if (Null{}).Equal(filter) { + return o, nil + } + + filteredObj, ok := filter.(*object) + if !ok { + return nil, fmt.Errorf("invalid filter value %q, expected an object", filter) + } + + switch v := o.(type) { + case String, Number, Boolean, Null: + return o, nil + case *Array: + values := NewArray() + for i := range v.Len() { + subFilter := filteredObj.Get(StringTerm(strconv.Itoa(i))) + if subFilter != nil { + filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value) + if err != nil { + return nil, err + } + values = values.Append(NewTerm(filteredValue)) + } + } + return values, nil + case Set: + terms := make([]*Term, 0, v.Len()) + for _, t := range v.Slice() { + if filteredObj.Get(t) != nil { + filteredValue, err := filterObject(t.Value, filteredObj.Get(t).Value) + if err != nil { + return nil, err + } + terms = append(terms, NewTerm(filteredValue)) + } + } + return NewSet(terms...), nil + case *object: + values := NewObject() + + iterObj := v + other := filteredObj + if v.Len() < filteredObj.Len() { + iterObj = filteredObj + other = v + } + + err := iterObj.Iter(func(key *Term, _ *Term) error { + if other.Get(key) != nil { + filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value) + if err != nil { + return err + } + values.Insert(key, NewTerm(filteredValue)) + } + return nil + }) + return values, err + default: + return nil, fmt.Errorf("invalid object value type %q", v) + } +} + +// NOTE(philipc): The only way to get an ObjectKeyIterator should be +// from an Object. This ensures that the iterator can have implementation- +// specific details internally, with no contracts except to the very +// limited interface. +type ObjectKeysIterator interface { + Next() (*Term, bool) +} + +type objectKeysIterator struct { + obj *object + numKeys int + index int +} + +func newobjectKeysIterator(o *object) ObjectKeysIterator { + return &objectKeysIterator{ + obj: o, + numKeys: o.Len(), + index: 0, + } +} + +func (oki *objectKeysIterator) Next() (*Term, bool) { + if oki.index == oki.numKeys || oki.numKeys == 0 { + return nil, false + } + oki.index++ + return oki.obj.sortedKeys()[oki.index-1].key, true +} + +// ArrayComprehension represents an array comprehension as defined in the language. +type ArrayComprehension struct { + Term *Term `json:"term"` + Body Body `json:"body"` +} + +// ArrayComprehensionTerm creates a new Term with an ArrayComprehension value. +func ArrayComprehensionTerm(term *Term, body Body) *Term { + return &Term{ + Value: &ArrayComprehension{ + Term: term, + Body: body, + }, + } +} + +// Copy returns a deep copy of ac. +func (ac *ArrayComprehension) Copy() *ArrayComprehension { + cpy := *ac + cpy.Body = ac.Body.Copy() + cpy.Term = ac.Term.Copy() + return &cpy +} + +// Equal returns true if ac is equal to other. +func (ac *ArrayComprehension) Equal(other Value) bool { + return Compare(ac, other) == 0 +} + +// Compare compares ac to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (ac *ArrayComprehension) Compare(other Value) int { + return Compare(ac, other) +} + +// Find returns the current value or a not found error. +func (ac *ArrayComprehension) Find(path Ref) (Value, error) { + if len(path) == 0 { + return ac, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code of the Value. +func (ac *ArrayComprehension) Hash() int { + return ac.Term.Hash() + ac.Body.Hash() +} + +// IsGround returns true if the Term and Body are ground. +func (ac *ArrayComprehension) IsGround() bool { + return ac.Term.IsGround() && ac.Body.IsGround() +} + +func (ac *ArrayComprehension) String() string { + return "[" + ac.Term.String() + " | " + ac.Body.String() + "]" +} + +// ObjectComprehension represents an object comprehension as defined in the language. +type ObjectComprehension struct { + Key *Term `json:"key"` + Value *Term `json:"value"` + Body Body `json:"body"` +} + +// ObjectComprehensionTerm creates a new Term with an ObjectComprehension value. +func ObjectComprehensionTerm(key, value *Term, body Body) *Term { + return &Term{ + Value: &ObjectComprehension{ + Key: key, + Value: value, + Body: body, + }, + } +} + +// Copy returns a deep copy of oc. +func (oc *ObjectComprehension) Copy() *ObjectComprehension { + cpy := *oc + cpy.Body = oc.Body.Copy() + cpy.Key = oc.Key.Copy() + cpy.Value = oc.Value.Copy() + return &cpy +} + +// Equal returns true if oc is equal to other. +func (oc *ObjectComprehension) Equal(other Value) bool { + return Compare(oc, other) == 0 +} + +// Compare compares oc to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (oc *ObjectComprehension) Compare(other Value) int { + return Compare(oc, other) +} + +// Find returns the current value or a not found error. +func (oc *ObjectComprehension) Find(path Ref) (Value, error) { + if len(path) == 0 { + return oc, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code of the Value. +func (oc *ObjectComprehension) Hash() int { + return oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash() +} + +// IsGround returns true if the Key, Value and Body are ground. +func (oc *ObjectComprehension) IsGround() bool { + return oc.Key.IsGround() && oc.Value.IsGround() && oc.Body.IsGround() +} + +func (oc *ObjectComprehension) String() string { + return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}" +} + +// SetComprehension represents a set comprehension as defined in the language. +type SetComprehension struct { + Term *Term `json:"term"` + Body Body `json:"body"` +} + +// SetComprehensionTerm creates a new Term with an SetComprehension value. +func SetComprehensionTerm(term *Term, body Body) *Term { + return &Term{ + Value: &SetComprehension{ + Term: term, + Body: body, + }, + } +} + +// Copy returns a deep copy of sc. +func (sc *SetComprehension) Copy() *SetComprehension { + cpy := *sc + cpy.Body = sc.Body.Copy() + cpy.Term = sc.Term.Copy() + return &cpy +} + +// Equal returns true if sc is equal to other. +func (sc *SetComprehension) Equal(other Value) bool { + return Compare(sc, other) == 0 +} + +// Compare compares sc to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (sc *SetComprehension) Compare(other Value) int { + return Compare(sc, other) +} + +// Find returns the current value or a not found error. +func (sc *SetComprehension) Find(path Ref) (Value, error) { + if len(path) == 0 { + return sc, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code of the Value. +func (sc *SetComprehension) Hash() int { + return sc.Term.Hash() + sc.Body.Hash() +} + +// IsGround returns true if the Term and Body are ground. +func (sc *SetComprehension) IsGround() bool { + return sc.Term.IsGround() && sc.Body.IsGround() +} + +func (sc *SetComprehension) String() string { + return "{" + sc.Term.String() + " | " + sc.Body.String() + "}" +} + +// Call represents as function call in the language. +type Call []*Term + +// CallTerm returns a new Term with a Call value defined by terms. The first +// term is the operator and the rest are operands. +func CallTerm(terms ...*Term) *Term { + return NewTerm(Call(terms)) +} + +// Copy returns a deep copy of c. +func (c Call) Copy() Call { + return termSliceCopy(c) +} + +// Compare compares c to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (c Call) Compare(other Value) int { + return Compare(c, other) +} + +// Find returns the current value or a not found error. +func (Call) Find(Ref) (Value, error) { + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (c Call) Hash() int { + return termSliceHash(c) +} + +// IsGround returns true if the Value is ground. +func (c Call) IsGround() bool { + return termSliceIsGround(c) +} + +// MakeExpr returns an ew Expr from this call. +func (c Call) MakeExpr(output *Term) *Expr { + terms := []*Term(c) + return NewExpr(append(terms, output)) +} + +func (c Call) String() string { + args := make([]string, len(c)-1) + for i := 1; i < len(c); i++ { + args[i-1] = c[i].String() + } + return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", ")) +} + +func termSliceCopy(a []*Term) []*Term { + cpy := make([]*Term, len(a)) + for i := range a { + cpy[i] = a[i].Copy() + } + return cpy +} + +func termSliceEqual(a, b []*Term) bool { + if len(a) == len(b) { + for i := range a { + if !a[i].Equal(b[i]) { + return false + } + } + return true + } + return false +} + +func termSliceHash(a []*Term) int { + var hash int + for _, v := range a { + hash += v.Value.Hash() + } + return hash +} + +func termSliceIsGround(a []*Term) bool { + for _, v := range a { + if !v.IsGround() { + return false + } + } + return true +} + +// NOTE(tsandall): The unmarshalling errors in these functions are not +// helpful for callers because they do not identify the source of the +// unmarshalling error. Because OPA doesn't accept JSON describing ASTs +// from callers, this is acceptable (for now). If that changes in the future, +// the error messages should be revisited. The current approach focuses +// on the happy path and treats all errors the same. If better error +// reporting is needed, the error paths will need to be fleshed out. + +func unmarshalBody(b []interface{}) (Body, error) { + buf := Body{} + for _, e := range b { + if m, ok := e.(map[string]interface{}); ok { + expr := &Expr{} + if err := unmarshalExpr(expr, m); err == nil { + buf = append(buf, expr) + continue + } + } + goto unmarshal_error + } + return buf, nil +unmarshal_error: + return nil, errors.New("ast: unable to unmarshal body") +} + +func unmarshalExpr(expr *Expr, v map[string]interface{}) error { + if x, ok := v["negated"]; ok { + if b, ok := x.(bool); ok { + expr.Negated = b + } else { + return fmt.Errorf("ast: unable to unmarshal negated field with type: %T (expected true or false)", v["negated"]) + } + } + if generatedRaw, ok := v["generated"]; ok { + if b, ok := generatedRaw.(bool); ok { + expr.Generated = b + } else { + return fmt.Errorf("ast: unable to unmarshal generated field with type: %T (expected true or false)", v["generated"]) + } + } + + if err := unmarshalExprIndex(expr, v); err != nil { + return err + } + switch ts := v["terms"].(type) { + case map[string]interface{}: + t, err := unmarshalTerm(ts) + if err != nil { + return err + } + expr.Terms = t + case []interface{}: + terms, err := unmarshalTermSlice(ts) + if err != nil { + return err + } + expr.Terms = terms + default: + return fmt.Errorf(`ast: unable to unmarshal terms field with type: %T (expected {"value": ..., "type": ...} or [{"value": ..., "type": ...}, ...])`, v["terms"]) + } + if x, ok := v["with"]; ok { + if sl, ok := x.([]interface{}); ok { + ws := make([]*With, len(sl)) + for i := range sl { + var err error + ws[i], err = unmarshalWith(sl[i]) + if err != nil { + return err + } + } + expr.With = ws + } + } + if loc, ok := v["location"].(map[string]interface{}); ok { + expr.Location = &Location{} + if err := unmarshalLocation(expr.Location, loc); err != nil { + return err + } + } + return nil +} + +func unmarshalLocation(loc *Location, v map[string]interface{}) error { + if x, ok := v["file"]; ok { + if s, ok := x.(string); ok { + loc.File = s + } else { + return fmt.Errorf("ast: unable to unmarshal file field with type: %T (expected string)", v["file"]) + } + } + if x, ok := v["row"]; ok { + if n, ok := x.(json.Number); ok { + i64, err := n.Int64() + if err != nil { + return err + } + loc.Row = int(i64) + } else { + return fmt.Errorf("ast: unable to unmarshal row field with type: %T (expected number)", v["row"]) + } + } + if x, ok := v["col"]; ok { + if n, ok := x.(json.Number); ok { + i64, err := n.Int64() + if err != nil { + return err + } + loc.Col = int(i64) + } else { + return fmt.Errorf("ast: unable to unmarshal col field with type: %T (expected number)", v["col"]) + } + } + + return nil +} + +func unmarshalExprIndex(expr *Expr, v map[string]interface{}) error { + if x, ok := v["index"]; ok { + if n, ok := x.(json.Number); ok { + i, err := n.Int64() + if err == nil { + expr.Index = int(i) + return nil + } + } + } + return fmt.Errorf("ast: unable to unmarshal index field with type: %T (expected integer)", v["index"]) +} + +func unmarshalTerm(m map[string]interface{}) (*Term, error) { + var term Term + + v, err := unmarshalValue(m) + if err != nil { + return nil, err + } + term.Value = v + + if loc, ok := m["location"].(map[string]interface{}); ok { + term.Location = &Location{} + if err := unmarshalLocation(term.Location, loc); err != nil { + return nil, err + } + } + + return &term, nil +} + +func unmarshalTermSlice(s []interface{}) ([]*Term, error) { + buf := []*Term{} + for _, x := range s { + if m, ok := x.(map[string]interface{}); ok { + t, err := unmarshalTerm(m) + if err == nil { + buf = append(buf, t) + continue + } + return nil, err + } + return nil, errors.New("ast: unable to unmarshal term") + } + return buf, nil +} + +func unmarshalTermSliceValue(d map[string]interface{}) ([]*Term, error) { + if s, ok := d["value"].([]interface{}); ok { + return unmarshalTermSlice(s) + } + return nil, errors.New(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`) +} + +func unmarshalWith(i interface{}) (*With, error) { + if m, ok := i.(map[string]interface{}); ok { + tgt, _ := m["target"].(map[string]interface{}) + target, err := unmarshalTerm(tgt) + if err == nil { + val, _ := m["value"].(map[string]interface{}) + value, err := unmarshalTerm(val) + if err == nil { + return &With{ + Target: target, + Value: value, + }, nil + } + return nil, err + } + return nil, err + } + return nil, errors.New(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`) +} + +func unmarshalValue(d map[string]interface{}) (Value, error) { + v := d["value"] + switch d["type"] { + case "null": + return NullValue, nil + case "boolean": + if b, ok := v.(bool); ok { + return Boolean(b), nil + } + case "number": + if n, ok := v.(json.Number); ok { + return Number(n), nil + } + case "string": + if s, ok := v.(string); ok { + return String(s), nil + } + case "var": + if s, ok := v.(string); ok { + return Var(s), nil + } + case "ref": + if s, err := unmarshalTermSliceValue(d); err == nil { + return Ref(s), nil + } + case "array": + if s, err := unmarshalTermSliceValue(d); err == nil { + return NewArray(s...), nil + } + case "set": + if s, err := unmarshalTermSliceValue(d); err == nil { + return NewSet(s...), nil + } + case "object": + if s, ok := v.([]interface{}); ok { + buf := NewObject() + for _, x := range s { + if i, ok := x.([]interface{}); ok && len(i) == 2 { + p, err := unmarshalTermSlice(i) + if err == nil { + buf.Insert(p[0], p[1]) + continue + } + } + goto unmarshal_error + } + return buf, nil + } + case "arraycomprehension", "setcomprehension": + if m, ok := v.(map[string]interface{}); ok { + t, ok := m["term"].(map[string]interface{}) + if !ok { + goto unmarshal_error + } + + term, err := unmarshalTerm(t) + if err != nil { + goto unmarshal_error + } + + b, ok := m["body"].([]interface{}) + if !ok { + goto unmarshal_error + } + + body, err := unmarshalBody(b) + if err != nil { + goto unmarshal_error + } + + if d["type"] == "arraycomprehension" { + return &ArrayComprehension{Term: term, Body: body}, nil + } + return &SetComprehension{Term: term, Body: body}, nil + } + case "objectcomprehension": + if m, ok := v.(map[string]interface{}); ok { + k, ok := m["key"].(map[string]interface{}) + if !ok { + goto unmarshal_error + } + + key, err := unmarshalTerm(k) + if err != nil { + goto unmarshal_error + } + + v, ok := m["value"].(map[string]interface{}) + if !ok { + goto unmarshal_error + } + + value, err := unmarshalTerm(v) + if err != nil { + goto unmarshal_error + } + + b, ok := m["body"].([]interface{}) + if !ok { + goto unmarshal_error + } + + body, err := unmarshalBody(b) + if err != nil { + goto unmarshal_error + } + + return &ObjectComprehension{Key: key, Value: value, Body: body}, nil + } + case "call": + if s, err := unmarshalTermSliceValue(d); err == nil { + return Call(s), nil + } + } +unmarshal_error: + return nil, errors.New("ast: unable to unmarshal term") +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go b/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go new file mode 100644 index 00000000000..e8c9ddcab1c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go @@ -0,0 +1,431 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" +) + +// Transformer defines the interface for transforming AST elements. If the +// transformer returns nil and does not indicate an error, the AST element will +// be set to nil and no transformations will be applied to children of the +// element. +type Transformer interface { + Transform(interface{}) (interface{}, error) +} + +// Transform iterates the AST and calls the Transform function on the +// Transformer t for x before recursing. +func Transform(t Transformer, x interface{}) (interface{}, error) { + + if term, ok := x.(*Term); ok { + return Transform(t, term.Value) + } + + y, err := t.Transform(x) + if err != nil { + return x, err + } + + if y == nil { + return nil, nil + } + + var ok bool + switch y := y.(type) { + case *Module: + p, err := Transform(t, y.Package) + if err != nil { + return nil, err + } + if y.Package, ok = p.(*Package); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p) + } + for i := range y.Imports { + imp, err := Transform(t, y.Imports[i]) + if err != nil { + return nil, err + } + if y.Imports[i], ok = imp.(*Import); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp) + } + } + for i := range y.Rules { + rule, err := Transform(t, y.Rules[i]) + if err != nil { + return nil, err + } + if y.Rules[i], ok = rule.(*Rule); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule) + } + } + for i := range y.Annotations { + a, err := Transform(t, y.Annotations[i]) + if err != nil { + return nil, err + } + if y.Annotations[i], ok = a.(*Annotations); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Annotations[i], a) + } + } + for i := range y.Comments { + comment, err := Transform(t, y.Comments[i]) + if err != nil { + return nil, err + } + if y.Comments[i], ok = comment.(*Comment); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment) + } + } + return y, nil + case *Package: + ref, err := Transform(t, y.Path) + if err != nil { + return nil, err + } + if y.Path, ok = ref.(Ref); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref) + } + return y, nil + case *Import: + y.Path, err = transformTerm(t, y.Path) + if err != nil { + return nil, err + } + if y.Alias, err = transformVar(t, y.Alias); err != nil { + return nil, err + } + return y, nil + case *Rule: + if y.Head, err = transformHead(t, y.Head); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + if y.Else != nil { + rule, err := Transform(t, y.Else) + if err != nil { + return nil, err + } + if y.Else, ok = rule.(*Rule); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule) + } + } + return y, nil + case *Head: + if y.Reference, err = transformRef(t, y.Reference); err != nil { + return nil, err + } + if y.Name, err = transformVar(t, y.Name); err != nil { + return nil, err + } + if y.Args, err = transformArgs(t, y.Args); err != nil { + return nil, err + } + if y.Key != nil { + if y.Key, err = transformTerm(t, y.Key); err != nil { + return nil, err + } + } + if y.Value != nil { + if y.Value, err = transformTerm(t, y.Value); err != nil { + return nil, err + } + } + return y, nil + case Args: + for i := range y { + if y[i], err = transformTerm(t, y[i]); err != nil { + return nil, err + } + } + return y, nil + case Body: + for i, e := range y { + e, err := Transform(t, e) + if err != nil { + return nil, err + } + if y[i], ok = e.(*Expr); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e) + } + } + return y, nil + case *Expr: + switch ts := y.Terms.(type) { + case *SomeDecl: + decl, err := Transform(t, ts) + if err != nil { + return nil, err + } + if y.Terms, ok = decl.(*SomeDecl); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y, decl) + } + return y, nil + case []*Term: + for i := range ts { + if ts[i], err = transformTerm(t, ts[i]); err != nil { + return nil, err + } + } + case *Term: + if y.Terms, err = transformTerm(t, ts); err != nil { + return nil, err + } + case *Every: + if ts.Key != nil { + ts.Key, err = transformTerm(t, ts.Key) + if err != nil { + return nil, err + } + } + ts.Value, err = transformTerm(t, ts.Value) + if err != nil { + return nil, err + } + ts.Domain, err = transformTerm(t, ts.Domain) + if err != nil { + return nil, err + } + ts.Body, err = transformBody(t, ts.Body) + if err != nil { + return nil, err + } + y.Terms = ts + } + for i, w := range y.With { + w, err := Transform(t, w) + if err != nil { + return nil, err + } + if y.With[i], ok = w.(*With); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w) + } + } + return y, nil + case *With: + if y.Target, err = transformTerm(t, y.Target); err != nil { + return nil, err + } + if y.Value, err = transformTerm(t, y.Value); err != nil { + return nil, err + } + return y, nil + case Ref: + for i, term := range y { + if y[i], err = transformTerm(t, term); err != nil { + return nil, err + } + } + return y, nil + case *object: + return y.Map(func(k, v *Term) (*Term, *Term, error) { + k, err := transformTerm(t, k) + if err != nil { + return nil, nil, err + } + v, err = transformTerm(t, v) + if err != nil { + return nil, nil, err + } + return k, v, nil + }) + case *Array: + for i := range y.Len() { + v, err := transformTerm(t, y.Elem(i)) + if err != nil { + return nil, err + } + y.set(i, v) + } + return y, nil + case Set: + y, err = y.Map(func(term *Term) (*Term, error) { + return transformTerm(t, term) + }) + if err != nil { + return nil, err + } + return y, nil + case *ArrayComprehension: + if y.Term, err = transformTerm(t, y.Term); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + return y, nil + case *ObjectComprehension: + if y.Key, err = transformTerm(t, y.Key); err != nil { + return nil, err + } + if y.Value, err = transformTerm(t, y.Value); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + return y, nil + case *SetComprehension: + if y.Term, err = transformTerm(t, y.Term); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + return y, nil + case Call: + for i := range y { + if y[i], err = transformTerm(t, y[i]); err != nil { + return nil, err + } + } + return y, nil + default: + return y, nil + } +} + +// TransformRefs calls the function f on all references under x. +func TransformRefs(x interface{}, f func(Ref) (Value, error)) (interface{}, error) { + t := &GenericTransformer{func(x interface{}) (interface{}, error) { + if r, ok := x.(Ref); ok { + return f(r) + } + return x, nil + }} + return Transform(t, x) +} + +// TransformVars calls the function f on all vars under x. +func TransformVars(x interface{}, f func(Var) (Value, error)) (interface{}, error) { + t := &GenericTransformer{func(x interface{}) (interface{}, error) { + if v, ok := x.(Var); ok { + return f(v) + } + return x, nil + }} + return Transform(t, x) +} + +// TransformComprehensions calls the functio nf on all comprehensions under x. +func TransformComprehensions(x interface{}, f func(interface{}) (Value, error)) (interface{}, error) { + t := &GenericTransformer{func(x interface{}) (interface{}, error) { + switch x := x.(type) { + case *ArrayComprehension: + return f(x) + case *SetComprehension: + return f(x) + case *ObjectComprehension: + return f(x) + } + return x, nil + }} + return Transform(t, x) +} + +// GenericTransformer implements the Transformer interface to provide a utility +// to transform AST nodes using a closure. +type GenericTransformer struct { + f func(interface{}) (interface{}, error) +} + +// NewGenericTransformer returns a new GenericTransformer that will transform +// AST nodes using the function f. +func NewGenericTransformer(f func(x interface{}) (interface{}, error)) *GenericTransformer { + return &GenericTransformer{ + f: f, + } +} + +// Transform calls the function f on the GenericTransformer. +func (t *GenericTransformer) Transform(x interface{}) (interface{}, error) { + return t.f(x) +} + +func transformHead(t Transformer, head *Head) (*Head, error) { + y, err := Transform(t, head) + if err != nil { + return nil, err + } + h, ok := y.(*Head) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", head, y) + } + return h, nil +} + +func transformArgs(t Transformer, args Args) (Args, error) { + y, err := Transform(t, args) + if err != nil { + return nil, err + } + a, ok := y.(Args) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", args, y) + } + return a, nil +} + +func transformBody(t Transformer, body Body) (Body, error) { + y, err := Transform(t, body) + if err != nil { + return nil, err + } + r, ok := y.(Body) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", body, y) + } + return r, nil +} + +func transformTerm(t Transformer, term *Term) (*Term, error) { + v, err := transformValue(t, term.Value) + if err != nil { + return nil, err + } + r := &Term{ + Value: v, + Location: term.Location, + } + return r, nil +} + +func transformValue(t Transformer, v Value) (Value, error) { + v1, err := Transform(t, v) + if err != nil { + return nil, err + } + r, ok := v1.(Value) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", v, v1) + } + return r, nil +} + +func transformVar(t Transformer, v Var) (Var, error) { + v1, err := Transform(t, v) + if err != nil { + return "", err + } + r, ok := v1.(Var) + if !ok { + return "", fmt.Errorf("illegal transform: %T != %T", v, v1) + } + return r, nil +} + +func transformRef(t Transformer, r Ref) (Ref, error) { + r1, err := Transform(t, r) + if err != nil { + return nil, err + } + r2, ok := r1.(Ref) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", r, r2) + } + return r2, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go b/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go new file mode 100644 index 00000000000..3af52815f79 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go @@ -0,0 +1,235 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +func isRefSafe(ref Ref, safe VarSet) bool { + switch head := ref[0].Value.(type) { + case Var: + return safe.Contains(head) + case Call: + return isCallSafe(head, safe) + default: + for v := range ref[0].Vars() { + if !safe.Contains(v) { + return false + } + } + return true + } +} + +func isCallSafe(call Call, safe VarSet) bool { + vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) + vis.Walk(call) + unsafe := vis.Vars().Diff(safe) + return len(unsafe) == 0 +} + +// Unify returns a set of variables that will be unified when the equality expression defined by +// terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already +// unified. +func Unify(safe VarSet, a *Term, b *Term) VarSet { + u := &unifier{ + safe: safe, + unified: VarSet{}, + unknown: map[Var]VarSet{}, + } + u.unify(a, b) + return u.unified +} + +type unifier struct { + safe VarSet + unified VarSet + unknown map[Var]VarSet +} + +func (u *unifier) isSafe(x Var) bool { + return u.safe.Contains(x) || u.unified.Contains(x) +} + +func (u *unifier) unify(a *Term, b *Term) { + + switch a := a.Value.(type) { + + case Var: + switch b := b.Value.(type) { + case Var: + if u.isSafe(b) { + u.markSafe(a) + } else if u.isSafe(a) { + u.markSafe(b) + } else { + u.markUnknown(a, b) + u.markUnknown(b, a) + } + case *Array, Object: + u.unifyAll(a, b) + case Ref: + if isRefSafe(b, u.safe) { + u.markSafe(a) + } + case Call: + if isCallSafe(b, u.safe) { + u.markSafe(a) + } + default: + u.markSafe(a) + } + + case Ref: + if isRefSafe(a, u.safe) { + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *Array, Object: + u.markAllSafe(b) + } + } + + case Call: + if isCallSafe(a, u.safe) { + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *Array, Object: + u.markAllSafe(b) + } + } + + case *ArrayComprehension: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *Array: + u.markAllSafe(b) + } + case *ObjectComprehension: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *object: + u.markAllSafe(b) + } + case *SetComprehension: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + } + + case *Array: + switch b := b.Value.(type) { + case Var: + u.unifyAll(b, a) + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + u.markAllSafe(a) + case Ref: + if isRefSafe(b, u.safe) { + u.markAllSafe(a) + } + case Call: + if isCallSafe(b, u.safe) { + u.markAllSafe(a) + } + case *Array: + if a.Len() == b.Len() { + for i := range a.Len() { + u.unify(a.Elem(i), b.Elem(i)) + } + } + } + + case *object: + switch b := b.Value.(type) { + case Var: + u.unifyAll(b, a) + case Ref: + if isRefSafe(b, u.safe) { + u.markAllSafe(a) + } + case Call: + if isCallSafe(b, u.safe) { + u.markAllSafe(a) + } + case *object: + if a.Len() == b.Len() { + _ = a.Iter(func(k, v *Term) error { + if v2 := b.Get(k); v2 != nil { + u.unify(v, v2) + } + return nil + }) // impossible to return error + } + } + + default: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + } + } +} + +func (u *unifier) markAllSafe(x Value) { + vis := u.varVisitor() + vis.Walk(x) + for v := range vis.Vars() { + u.markSafe(v) + } +} + +func (u *unifier) markSafe(x Var) { + u.unified.Add(x) + + // Add dependencies of 'x' to safe set + vs := u.unknown[x] + delete(u.unknown, x) + for v := range vs { + u.markSafe(v) + } + + // Add dependants of 'x' to safe set if they have no more + // dependencies. + for v, deps := range u.unknown { + if deps.Contains(x) { + delete(deps, x) + if len(deps) == 0 { + u.markSafe(v) + } + } + } +} + +func (u *unifier) markUnknown(a, b Var) { + if _, ok := u.unknown[a]; !ok { + u.unknown[a] = NewVarSet() + } + u.unknown[a].Add(b) +} + +func (u *unifier) unifyAll(a Var, b Value) { + if u.isSafe(a) { + u.markAllSafe(b) + } else { + vis := u.varVisitor() + vis.Walk(b) + unsafe := vis.Vars().Diff(u.safe).Diff(u.unified) + if len(unsafe) == 0 { + u.markSafe(a) + } else { + for v := range unsafe { + u.markUnknown(a, v) + } + } + } +} + +func (*unifier) varVisitor() *VarVisitor { + return NewVarVisitor().WithParams(VarVisitorParams{ + SkipRefHead: true, + SkipObjectKeys: true, + SkipClosures: true, + }) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go b/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go new file mode 100644 index 00000000000..bccb035e30f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go @@ -0,0 +1,117 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "slices" + + "github.com/open-policy-agent/opa/v1/util" +) + +// VarSet represents a set of variables. +type VarSet map[Var]struct{} + +// NewVarSet returns a new VarSet containing the specified variables. +func NewVarSet(vs ...Var) VarSet { + s := make(VarSet, len(vs)) + for _, v := range vs { + s.Add(v) + } + return s +} + +// NewVarSet returns a new VarSet containing the specified variables. +func NewVarSetOfSize(size int) VarSet { + return make(VarSet, size) +} + +// Add updates the set to include the variable "v". +func (s VarSet) Add(v Var) { + s[v] = struct{}{} +} + +// Contains returns true if the set contains the variable "v". +func (s VarSet) Contains(v Var) bool { + _, ok := s[v] + return ok +} + +// Copy returns a shallow copy of the VarSet. +func (s VarSet) Copy() VarSet { + cpy := NewVarSetOfSize(len(s)) + for v := range s { + cpy.Add(v) + } + return cpy +} + +// Diff returns a VarSet containing variables in s that are not in vs. +func (s VarSet) Diff(vs VarSet) VarSet { + i := 0 + for v := range s { + if !vs.Contains(v) { + i++ + } + } + r := NewVarSetOfSize(i) + for v := range s { + if !vs.Contains(v) { + r.Add(v) + } + } + return r +} + +// Equal returns true if s contains exactly the same elements as vs. +func (s VarSet) Equal(vs VarSet) bool { + if len(s) != len(vs) { + return false + } + for v := range s { + if !vs.Contains(v) { + return false + } + } + return true +} + +// Intersect returns a VarSet containing variables in s that are in vs. +func (s VarSet) Intersect(vs VarSet) VarSet { + i := 0 + for v := range s { + if vs.Contains(v) { + i++ + } + } + r := NewVarSetOfSize(i) + for v := range s { + if vs.Contains(v) { + r.Add(v) + } + } + return r +} + +// Sorted returns a new sorted slice of vars from s. +func (s VarSet) Sorted() []Var { + sorted := make([]Var, 0, len(s)) + for v := range s { + sorted = append(sorted, v) + } + slices.SortFunc(sorted, VarCompare) + return sorted +} + +// Update merges the other VarSet into this VarSet. +func (s VarSet) Update(vs VarSet) { + for v := range vs { + s.Add(v) + } +} + +func (s VarSet) String() string { + return fmt.Sprintf("%v", util.KeysSorted(s)) +} diff --git a/vendor/github.com/open-policy-agent/opa/ast/version_index.json b/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json similarity index 99% rename from vendor/github.com/open-policy-agent/opa/ast/version_index.json rename to vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json index 718df220f94..eecb68c772b 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/version_index.json +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json @@ -497,6 +497,13 @@ "PreRelease": "", "Metadata": "" }, + "internal.test_case": { + "Major": 1, + "Minor": 2, + "Patch": 0, + "PreRelease": "", + "Metadata": "" + }, "intersection": { "Major": 0, "Minor": 17, @@ -1395,6 +1402,13 @@ } }, "features": { + "rego_v1": { + "Major": 1, + "Minor": 0, + "Patch": 0, + "PreRelease": "", + "Metadata": "" + }, "rego_v1_import": { "Major": 0, "Minor": 59, diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go b/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go new file mode 100644 index 00000000000..0115c4f4552 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go @@ -0,0 +1,783 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +// Visitor defines the interface for iterating AST elements. The Visit function +// can return a Visitor w which will be used to visit the children of the AST +// element v. If the Visit function returns nil, the children will not be +// visited. +// Deprecated: use GenericVisitor or another visitor implementation +type Visitor interface { + Visit(v interface{}) (w Visitor) +} + +// BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before +// and after the AST has been visited. +// Deprecated: use GenericVisitor or another visitor implementation +type BeforeAndAfterVisitor interface { + Visitor + Before(x interface{}) + After(x interface{}) +} + +// Walk iterates the AST by calling the Visit function on the Visitor +// v for x before recursing. +// Deprecated: use GenericVisitor.Walk +func Walk(v Visitor, x interface{}) { + if bav, ok := v.(BeforeAndAfterVisitor); !ok { + walk(v, x) + } else { + bav.Before(x) + defer bav.After(x) + walk(bav, x) + } +} + +// WalkBeforeAndAfter iterates the AST by calling the Visit function on the +// Visitor v for x before recursing. +// Deprecated: use GenericVisitor.Walk +func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x interface{}) { + Walk(v, x) +} + +func walk(v Visitor, x interface{}) { + w := v.Visit(x) + if w == nil { + return + } + switch x := x.(type) { + case *Module: + Walk(w, x.Package) + for i := range x.Imports { + Walk(w, x.Imports[i]) + } + for i := range x.Rules { + Walk(w, x.Rules[i]) + } + for i := range x.Annotations { + Walk(w, x.Annotations[i]) + } + for i := range x.Comments { + Walk(w, x.Comments[i]) + } + case *Package: + Walk(w, x.Path) + case *Import: + Walk(w, x.Path) + Walk(w, x.Alias) + case *Rule: + Walk(w, x.Head) + Walk(w, x.Body) + if x.Else != nil { + Walk(w, x.Else) + } + case *Head: + Walk(w, x.Name) + Walk(w, x.Args) + if x.Key != nil { + Walk(w, x.Key) + } + if x.Value != nil { + Walk(w, x.Value) + } + case Body: + for i := range x { + Walk(w, x[i]) + } + case Args: + for i := range x { + Walk(w, x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + Walk(w, ts) + case []*Term: + for i := range ts { + Walk(w, ts[i]) + } + } + for i := range x.With { + Walk(w, x.With[i]) + } + case *With: + Walk(w, x.Target) + Walk(w, x.Value) + case *Term: + Walk(w, x.Value) + case Ref: + for i := range x { + Walk(w, x[i]) + } + case *object: + x.Foreach(func(k, vv *Term) { + Walk(w, k) + Walk(w, vv) + }) + case *Array: + x.Foreach(func(t *Term) { + Walk(w, t) + }) + case Set: + x.Foreach(func(t *Term) { + Walk(w, t) + }) + case *ArrayComprehension: + Walk(w, x.Term) + Walk(w, x.Body) + case *ObjectComprehension: + Walk(w, x.Key) + Walk(w, x.Value) + Walk(w, x.Body) + case *SetComprehension: + Walk(w, x.Term) + Walk(w, x.Body) + case Call: + for i := range x { + Walk(w, x[i]) + } + case *Every: + if x.Key != nil { + Walk(w, x.Key) + } + Walk(w, x.Value) + Walk(w, x.Domain) + Walk(w, x.Body) + case *SomeDecl: + for i := range x.Symbols { + Walk(w, x.Symbols[i]) + } + } +} + +// WalkVars calls the function f on all vars under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkVars(x interface{}, f func(Var) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if v, ok := x.(Var); ok { + return f(v) + } + return false + }} + vis.Walk(x) +} + +// WalkClosures calls the function f on all closures under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkClosures(x interface{}, f func(interface{}) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + switch x := x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: + return f(x) + } + return false + }} + vis.Walk(x) +} + +// WalkRefs calls the function f on all references under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkRefs(x interface{}, f func(Ref) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if r, ok := x.(Ref); ok { + return f(r) + } + return false + }} + vis.Walk(x) +} + +// WalkTerms calls the function f on all terms under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkTerms(x interface{}, f func(*Term) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if term, ok := x.(*Term); ok { + return f(term) + } + return false + }} + vis.Walk(x) +} + +// WalkWiths calls the function f on all with modifiers under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkWiths(x interface{}, f func(*With) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if w, ok := x.(*With); ok { + return f(w) + } + return false + }} + vis.Walk(x) +} + +// WalkExprs calls the function f on all expressions under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkExprs(x interface{}, f func(*Expr) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if r, ok := x.(*Expr); ok { + return f(r) + } + return false + }} + vis.Walk(x) +} + +// WalkBodies calls the function f on all bodies under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkBodies(x interface{}, f func(Body) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if b, ok := x.(Body); ok { + return f(b) + } + return false + }} + vis.Walk(x) +} + +// WalkRules calls the function f on all rules under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkRules(x interface{}, f func(*Rule) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if r, ok := x.(*Rule); ok { + stop := f(r) + // NOTE(tsandall): since rules cannot be embedded inside of queries + // we can stop early if there is no else block. + if stop || r.Else == nil { + return true + } + } + return false + }} + vis.Walk(x) +} + +// WalkNodes calls the function f on all nodes under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkNodes(x interface{}, f func(Node) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if n, ok := x.(Node); ok { + return f(n) + } + return false + }} + vis.Walk(x) +} + +// GenericVisitor provides a utility to walk over AST nodes using a +// closure. If the closure returns true, the visitor will not walk +// over AST nodes under x. +type GenericVisitor struct { + f func(x interface{}) bool +} + +// NewGenericVisitor returns a new GenericVisitor that will invoke the function +// f on AST nodes. +func NewGenericVisitor(f func(x interface{}) bool) *GenericVisitor { + return &GenericVisitor{f} +} + +// Walk iterates the AST by calling the function f on the +// GenericVisitor before recursing. Contrary to the generic Walk, this +// does not require allocating the visitor from heap. +func (vis *GenericVisitor) Walk(x interface{}) { + if vis.f(x) { + return + } + + switch x := x.(type) { + case *Module: + vis.Walk(x.Package) + for i := range x.Imports { + vis.Walk(x.Imports[i]) + } + for i := range x.Rules { + vis.Walk(x.Rules[i]) + } + for i := range x.Annotations { + vis.Walk(x.Annotations[i]) + } + for i := range x.Comments { + vis.Walk(x.Comments[i]) + } + case *Package: + vis.Walk(x.Path) + case *Import: + vis.Walk(x.Path) + vis.Walk(x.Alias) + case *Rule: + vis.Walk(x.Head) + vis.Walk(x.Body) + if x.Else != nil { + vis.Walk(x.Else) + } + case *Head: + vis.Walk(x.Name) + vis.Walk(x.Args) + if x.Key != nil { + vis.Walk(x.Key) + } + if x.Value != nil { + vis.Walk(x.Value) + } + case Body: + for i := range x { + vis.Walk(x[i]) + } + case Args: + for i := range x { + vis.Walk(x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + vis.Walk(ts) + case []*Term: + for i := range ts { + vis.Walk(ts[i]) + } + } + for i := range x.With { + vis.Walk(x.With[i]) + } + case *With: + vis.Walk(x.Target) + vis.Walk(x.Value) + case *Term: + vis.Walk(x.Value) + case Ref: + for i := range x { + vis.Walk(x[i]) + } + case *object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case Object: + for _, k := range x.Keys() { + vis.Walk(k) + vis.Walk(x.Get(k)) + } + case *Array: + for i := range x.Len() { + vis.Walk(x.Elem(i)) + } + case Set: + xSlice := x.Slice() + for i := range xSlice { + vis.Walk(xSlice[i]) + } + case *ArrayComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case *ObjectComprehension: + vis.Walk(x.Key) + vis.Walk(x.Value) + vis.Walk(x.Body) + case *SetComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case Call: + for i := range x { + vis.Walk(x[i]) + } + case *Every: + if x.Key != nil { + vis.Walk(x.Key) + } + vis.Walk(x.Value) + vis.Walk(x.Domain) + vis.Walk(x.Body) + case *SomeDecl: + for i := range x.Symbols { + vis.Walk(x.Symbols[i]) + } + } +} + +// BeforeAfterVisitor provides a utility to walk over AST nodes using +// closures. If the before closure returns true, the visitor will not +// walk over AST nodes under x. The after closure is invoked always +// after visiting a node. +type BeforeAfterVisitor struct { + before func(x interface{}) bool + after func(x interface{}) +} + +// NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that +// will invoke the functions before and after AST nodes. +func NewBeforeAfterVisitor(before func(x interface{}) bool, after func(x interface{})) *BeforeAfterVisitor { + return &BeforeAfterVisitor{before, after} +} + +// Walk iterates the AST by calling the functions on the +// BeforeAndAfterVisitor before and after recursing. Contrary to the +// generic Walk, this does not require allocating the visitor from +// heap. +func (vis *BeforeAfterVisitor) Walk(x interface{}) { + defer vis.after(x) + if vis.before(x) { + return + } + + switch x := x.(type) { + case *Module: + vis.Walk(x.Package) + for i := range x.Imports { + vis.Walk(x.Imports[i]) + } + for i := range x.Rules { + vis.Walk(x.Rules[i]) + } + for i := range x.Annotations { + vis.Walk(x.Annotations[i]) + } + for i := range x.Comments { + vis.Walk(x.Comments[i]) + } + case *Package: + vis.Walk(x.Path) + case *Import: + vis.Walk(x.Path) + vis.Walk(x.Alias) + case *Rule: + vis.Walk(x.Head) + vis.Walk(x.Body) + if x.Else != nil { + vis.Walk(x.Else) + } + case *Head: + if len(x.Reference) > 0 { + vis.Walk(x.Reference) + } else { + vis.Walk(x.Name) + if x.Key != nil { + vis.Walk(x.Key) + } + } + vis.Walk(x.Args) + if x.Value != nil { + vis.Walk(x.Value) + } + case Body: + for i := range x { + vis.Walk(x[i]) + } + case Args: + for i := range x { + vis.Walk(x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + vis.Walk(ts) + case []*Term: + for i := range ts { + vis.Walk(ts[i]) + } + } + for i := range x.With { + vis.Walk(x.With[i]) + } + case *With: + vis.Walk(x.Target) + vis.Walk(x.Value) + case *Term: + vis.Walk(x.Value) + case Ref: + for i := range x { + vis.Walk(x[i]) + } + case *object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case Object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case *Array: + x.Foreach(func(t *Term) { + vis.Walk(t) + }) + case Set: + xSlice := x.Slice() + for i := range xSlice { + vis.Walk(xSlice[i]) + } + case *ArrayComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case *ObjectComprehension: + vis.Walk(x.Key) + vis.Walk(x.Value) + vis.Walk(x.Body) + case *SetComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case Call: + for i := range x { + vis.Walk(x[i]) + } + case *Every: + if x.Key != nil { + vis.Walk(x.Key) + } + vis.Walk(x.Value) + vis.Walk(x.Domain) + vis.Walk(x.Body) + case *SomeDecl: + for i := range x.Symbols { + vis.Walk(x.Symbols[i]) + } + } +} + +// VarVisitor walks AST nodes under a given node and collects all encountered +// variables. The collected variables can be controlled by specifying +// VarVisitorParams when creating the visitor. +type VarVisitor struct { + params VarVisitorParams + vars VarSet +} + +// VarVisitorParams contains settings for a VarVisitor. +type VarVisitorParams struct { + SkipRefHead bool + SkipRefCallHead bool + SkipObjectKeys bool + SkipClosures bool + SkipWithTarget bool + SkipSets bool +} + +// NewVarVisitor returns a new VarVisitor object. +func NewVarVisitor() *VarVisitor { + return &VarVisitor{ + vars: NewVarSet(), + } +} + +// WithParams sets the parameters in params on vis. +func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor { + vis.params = params + return vis +} + +// Vars returns a VarSet that contains collected vars. +func (vis *VarVisitor) Vars() VarSet { + return vis.vars +} + +// visit determines if the VarVisitor will recurse into x: if it returns `true`, +// the visitor will _skip_ that branch of the AST +func (vis *VarVisitor) visit(v interface{}) bool { + if vis.params.SkipObjectKeys { + if o, ok := v.(Object); ok { + o.Foreach(func(_, v *Term) { + vis.Walk(v) + }) + return true + } + } + if vis.params.SkipRefHead { + if r, ok := v.(Ref); ok { + rSlice := r[1:] + for i := range rSlice { + vis.Walk(rSlice[i]) + } + return true + } + } + if vis.params.SkipClosures { + switch v := v.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return true + case *Expr: + if ev, ok := v.Terms.(*Every); ok { + vis.Walk(ev.Domain) + // We're _not_ walking ev.Body -- that's the closure here + return true + } + } + } + if vis.params.SkipWithTarget { + if v, ok := v.(*With); ok { + vis.Walk(v.Value) + return true + } + } + if vis.params.SkipSets { + if _, ok := v.(Set); ok { + return true + } + } + if vis.params.SkipRefCallHead { + switch v := v.(type) { + case *Expr: + if terms, ok := v.Terms.([]*Term); ok { + termSlice := terms[0].Value.(Ref)[1:] + for i := range termSlice { + vis.Walk(termSlice[i]) + } + for i := 1; i < len(terms); i++ { + vis.Walk(terms[i]) + } + for i := range v.With { + vis.Walk(v.With[i]) + } + return true + } + case Call: + operator := v[0].Value.(Ref) + for i := 1; i < len(operator); i++ { + vis.Walk(operator[i]) + } + for i := 1; i < len(v); i++ { + vis.Walk(v[i]) + } + return true + case *With: + if ref, ok := v.Target.Value.(Ref); ok { + refSlice := ref[1:] + for i := range refSlice { + vis.Walk(refSlice[i]) + } + } + if ref, ok := v.Value.Value.(Ref); ok { + refSlice := ref[1:] + for i := range refSlice { + vis.Walk(refSlice[i]) + } + } else { + vis.Walk(v.Value) + } + return true + } + } + if v, ok := v.(Var); ok { + vis.vars.Add(v) + } + return false +} + +// Walk iterates the AST by calling the function f on the +// GenericVisitor before recursing. Contrary to the generic Walk, this +// does not require allocating the visitor from heap. +func (vis *VarVisitor) Walk(x interface{}) { + if vis.visit(x) { + return + } + + switch x := x.(type) { + case *Module: + vis.Walk(x.Package) + for i := range x.Imports { + vis.Walk(x.Imports[i]) + } + for i := range x.Rules { + vis.Walk(x.Rules[i]) + } + for i := range x.Comments { + vis.Walk(x.Comments[i]) + } + case *Package: + vis.Walk(x.Path) + case *Import: + vis.Walk(x.Path) + vis.Walk(x.Alias) + case *Rule: + vis.Walk(x.Head) + vis.Walk(x.Body) + if x.Else != nil { + vis.Walk(x.Else) + } + case *Head: + if len(x.Reference) > 0 { + vis.Walk(x.Reference) + } else { + vis.Walk(x.Name) + if x.Key != nil { + vis.Walk(x.Key) + } + } + vis.Walk(x.Args) + + if x.Value != nil { + vis.Walk(x.Value) + } + case Body: + for i := range x { + vis.Walk(x[i]) + } + case Args: + for i := range x { + vis.Walk(x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + vis.Walk(ts) + case []*Term: + for i := range ts { + vis.Walk(ts[i]) + } + } + for i := range x.With { + vis.Walk(x.With[i]) + } + case *With: + vis.Walk(x.Target) + vis.Walk(x.Value) + case *Term: + vis.Walk(x.Value) + case Ref: + for i := range x { + vis.Walk(x[i]) + } + case *object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case *Array: + x.Foreach(func(t *Term) { + vis.Walk(t) + }) + case Set: + xSlice := x.Slice() + for i := range xSlice { + vis.Walk(xSlice[i]) + } + case *ArrayComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case *ObjectComprehension: + vis.Walk(x.Key) + vis.Walk(x.Value) + vis.Walk(x.Body) + case *SetComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case Call: + for i := range x { + vis.Walk(x[i]) + } + case *Every: + if x.Key != nil { + vis.Walk(x.Key) + } + vis.Walk(x.Value) + vis.Walk(x.Domain) + vis.Walk(x.Body) + case *SomeDecl: + for i := range x.Symbols { + vis.Walk(x.Symbols[i]) + } + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go new file mode 100644 index 00000000000..d6bf846fc4b --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go @@ -0,0 +1,1782 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle implements bundle loading. +package bundle + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + + "github.com/gobwas/glob" + "github.com/open-policy-agent/opa/internal/file/archive" + "github.com/open-policy-agent/opa/internal/merge" + "github.com/open-policy-agent/opa/v1/ast" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/format" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/util" +) + +// Common file extensions and file names. +const ( + RegoExt = ".rego" + WasmFile = "policy.wasm" + PlanFile = "plan.json" + ManifestExt = ".manifest" + SignaturesFile = "signatures.json" + patchFile = "patch.json" + dataFile = "data.json" + yamlDataFile = "data.yaml" + ymlDataFile = "data.yml" + defaultHashingAlg = "SHA-256" + DefaultSizeLimitBytes = (1024 * 1024 * 1024) // limit bundle reads to 1GB to protect against gzip bombs + DeltaBundleType = "delta" + SnapshotBundleType = "snapshot" +) + +// Bundle represents a loaded bundle. The bundle can contain data and policies. +type Bundle struct { + Signatures SignaturesConfig + Manifest Manifest + Data map[string]interface{} + Modules []ModuleFile + Wasm []byte // Deprecated. Use WasmModules instead + WasmModules []WasmModuleFile + PlanModules []PlanModuleFile + Patch Patch + Etag string + Raw []Raw + + lazyLoadingMode bool + sizeLimitBytes int64 +} + +// Raw contains raw bytes representing the bundle's content +type Raw struct { + Path string + Value []byte + module *ModuleFile +} + +// Patch contains an array of objects wherein each object represents the patch operation to be +// applied to the bundle data. +type Patch struct { + Data []PatchOperation `json:"data,omitempty"` +} + +// PatchOperation models a single patch operation against a document. +type PatchOperation struct { + Op string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` +} + +// SignaturesConfig represents an array of JWTs that encapsulate the signatures for the bundle. +type SignaturesConfig struct { + Signatures []string `json:"signatures,omitempty"` + Plugin string `json:"plugin,omitempty"` +} + +// isEmpty returns if the SignaturesConfig is empty. +func (s SignaturesConfig) isEmpty() bool { + return reflect.DeepEqual(s, SignaturesConfig{}) +} + +// DecodedSignature represents the decoded JWT payload. +type DecodedSignature struct { + Files []FileInfo `json:"files"` + KeyID string `json:"keyid"` // Deprecated, use kid in the JWT header instead. + Scope string `json:"scope"` + IssuedAt int64 `json:"iat"` + Issuer string `json:"iss"` +} + +// FileInfo contains the hashing algorithm used, resulting digest etc. +type FileInfo struct { + Name string `json:"name"` + Hash string `json:"hash"` + Algorithm string `json:"algorithm"` +} + +// NewFile returns a new FileInfo. +func NewFile(name, hash, alg string) FileInfo { + return FileInfo{ + Name: name, + Hash: hash, + Algorithm: alg, + } +} + +// Manifest represents the manifest from a bundle. The manifest may contain +// metadata such as the bundle revision. +type Manifest struct { + Revision string `json:"revision"` + Roots *[]string `json:"roots,omitempty"` + WasmResolvers []WasmResolver `json:"wasm,omitempty"` + // RegoVersion is the global Rego version for the bundle described by this Manifest. + // The Rego version of individual files can be overridden in FileRegoVersions. + // We don't use ast.RegoVersion here, as this iota type's order isn't guaranteed to be stable over time. + // We use a pointer so that we can support hand-made bundles that don't have an explicit version appropriately. + // E.g. in OPA 0.x if --v1-compatible is used when consuming the bundle, and there is no specified version, + // we should default to v1; if --v1-compatible isn't used, we should default to v0. In OPA 1.0, no --x-compatible + // flag and no explicit bundle version should default to v1. + RegoVersion *int `json:"rego_version,omitempty"` + // FileRegoVersions is a map from file paths to Rego versions. + // This allows individual files to override the global Rego version specified by RegoVersion. + FileRegoVersions map[string]int `json:"file_rego_versions,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + + compiledFileRegoVersions []fileRegoVersion +} + +type fileRegoVersion struct { + path glob.Glob + version int +} + +// WasmResolver maps a wasm module to an entrypoint ref. +type WasmResolver struct { + Entrypoint string `json:"entrypoint,omitempty"` + Module string `json:"module,omitempty"` + Annotations []*ast.Annotations `json:"annotations,omitempty"` +} + +// Init initializes the manifest. If you instantiate a manifest +// manually, call Init to ensure that the roots are set properly. +func (m *Manifest) Init() { + if m.Roots == nil { + defaultRoots := []string{""} + m.Roots = &defaultRoots + } +} + +// AddRoot adds r to the roots of m. This function is idempotent. +func (m *Manifest) AddRoot(r string) { + m.Init() + if !RootPathsContain(*m.Roots, r) { + *m.Roots = append(*m.Roots, r) + } +} + +func (m *Manifest) SetRegoVersion(v ast.RegoVersion) { + m.Init() + regoVersion := 0 + if v == ast.RegoV1 { + regoVersion = 1 + } + m.RegoVersion = ®oVersion +} + +// Equal returns true if m is semantically equivalent to other. +func (m Manifest) Equal(other Manifest) bool { + + // This is safe since both are passed by value. + m.Init() + other.Init() + + if m.Revision != other.Revision { + return false + } + + if m.RegoVersion == nil && other.RegoVersion != nil { + return false + } + if m.RegoVersion != nil && other.RegoVersion == nil { + return false + } + if m.RegoVersion != nil && other.RegoVersion != nil && *m.RegoVersion != *other.RegoVersion { + return false + } + + // If both are nil, or both are empty, we consider them equal. + if !(len(m.FileRegoVersions) == 0 && len(other.FileRegoVersions) == 0) && + !reflect.DeepEqual(m.FileRegoVersions, other.FileRegoVersions) { + return false + } + + if !reflect.DeepEqual(m.Metadata, other.Metadata) { + return false + } + + return m.equalWasmResolversAndRoots(other) +} + +func (m Manifest) Empty() bool { + return m.Equal(Manifest{}) +} + +// Copy returns a deep copy of the manifest. +func (m Manifest) Copy() Manifest { + m.Init() + roots := make([]string, len(*m.Roots)) + copy(roots, *m.Roots) + m.Roots = &roots + + wasmModules := make([]WasmResolver, len(m.WasmResolvers)) + copy(wasmModules, m.WasmResolvers) + m.WasmResolvers = wasmModules + + metadata := m.Metadata + + if metadata != nil { + m.Metadata = make(map[string]interface{}) + for k, v := range metadata { + m.Metadata[k] = v + } + } + + return m +} + +func (m Manifest) String() string { + m.Init() + if m.RegoVersion != nil { + return fmt.Sprintf("", + m.Revision, *m.RegoVersion, *m.Roots, m.WasmResolvers, m.Metadata) + } + return fmt.Sprintf("", + m.Revision, *m.Roots, m.WasmResolvers, m.Metadata) +} + +func (m Manifest) rootSet() stringSet { + rs := map[string]struct{}{} + + for _, r := range *m.Roots { + rs[r] = struct{}{} + } + + return stringSet(rs) +} + +func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool { + if len(m.WasmResolvers) != len(other.WasmResolvers) { + return false + } + + for i := range len(m.WasmResolvers) { + if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) { + return false + } + } + + return m.rootSet().Equal(other.rootSet()) +} + +func (wr *WasmResolver) Equal(other *WasmResolver) bool { + if wr == nil && other == nil { + return true + } + + if wr == nil || other == nil { + return false + } + + if wr.Module != other.Module { + return false + } + + if wr.Entrypoint != other.Entrypoint { + return false + } + + annotLen := len(wr.Annotations) + if annotLen != len(other.Annotations) { + return false + } + + for i := range annotLen { + if wr.Annotations[i].Compare(other.Annotations[i]) != 0 { + return false + } + } + + return true +} + +type stringSet map[string]struct{} + +func (ss stringSet) Equal(other stringSet) bool { + if len(ss) != len(other) { + return false + } + for k := range other { + if _, ok := ss[k]; !ok { + return false + } + } + return true +} + +func (m *Manifest) validateAndInjectDefaults(b Bundle) error { + + m.Init() + + // Validate roots in bundle. + roots := *m.Roots + + // Standardize the roots (no starting or trailing slash) + for i := range roots { + roots[i] = strings.Trim(roots[i], "/") + } + + for i := range len(roots) - 1 { + for j := i + 1; j < len(roots); j++ { + if RootPathsOverlap(roots[i], roots[j]) { + return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j]) + } + } + } + + // Validate modules in bundle. + for _, module := range b.Modules { + found := false + if path, err := module.Parsed.Package.Path.Ptr(); err == nil { + found = RootPathsContain(roots, path) + } + if !found { + return fmt.Errorf("manifest roots %v do not permit '%v' in module '%v'", roots, module.Parsed.Package, module.Path) + } + } + + // Build a set of wasm module entrypoints to validate + wasmModuleToEps := map[string]string{} + seenEps := map[string]struct{}{} + for _, wm := range b.WasmModules { + wasmModuleToEps[wm.Path] = "" + } + + for _, wmConfig := range b.Manifest.WasmResolvers { + _, ok := wasmModuleToEps[wmConfig.Module] + if !ok { + return fmt.Errorf("manifest references wasm module '%s' but the module file does not exist", wmConfig.Module) + } + + // Ensure wasm module entrypoint in within bundle roots + if !RootPathsContain(roots, wmConfig.Entrypoint) { + return fmt.Errorf("manifest roots %v do not permit '%v' entrypoint for wasm module '%v'", roots, wmConfig.Entrypoint, wmConfig.Module) + } + + if _, ok := seenEps[wmConfig.Entrypoint]; ok { + return fmt.Errorf("entrypoint '%s' cannot be used by more than one wasm module", wmConfig.Entrypoint) + } + seenEps[wmConfig.Entrypoint] = struct{}{} + + wasmModuleToEps[wmConfig.Module] = wmConfig.Entrypoint + } + + // Validate data patches in bundle. + for _, patch := range b.Patch.Data { + path := strings.Trim(patch.Path, "/") + if !RootPathsContain(roots, path) { + return fmt.Errorf("manifest roots %v do not permit data patch at path '%s'", roots, path) + } + } + + if b.lazyLoadingMode { + return nil + } + + // Validate data in bundle. + return dfs(b.Data, "", func(path string, node interface{}) (bool, error) { + path = strings.Trim(path, "/") + if RootPathsContain(roots, path) { + return true, nil + } + + if _, ok := node.(map[string]interface{}); ok { + for i := range roots { + if RootPathsContain(strings.Split(path, "/"), roots[i]) { + return false, nil + } + } + } + return false, fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, path) + }) +} + +// ModuleFile represents a single module contained in a bundle. +type ModuleFile struct { + URL string + Path string + RelativePath string + Raw []byte + Parsed *ast.Module +} + +// WasmModuleFile represents a single wasm module contained in a bundle. +type WasmModuleFile struct { + URL string + Path string + Entrypoints []ast.Ref + Raw []byte +} + +// PlanModuleFile represents a single plan module contained in a bundle. +// +// NOTE(tsandall): currently the plans are just opaque binary blobs. In the +// future we could inject the entrypoints so that the plans could be executed +// inside of OPA proper like we do for Wasm modules. +type PlanModuleFile struct { + URL string + Path string + Raw []byte +} + +// Reader contains the reader to load the bundle from. +type Reader struct { + loader DirectoryLoader + includeManifestInData bool + metrics metrics.Metrics + baseDir string + verificationConfig *VerificationConfig + skipVerify bool + processAnnotations bool + capabilities *ast.Capabilities + files map[string]FileInfo // files in the bundle signature payload + sizeLimitBytes int64 + etag string + lazyLoadingMode bool + name string + persist bool + regoVersion ast.RegoVersion + followSymlinks bool +} + +// NewReader is deprecated. Use NewCustomReader instead. +func NewReader(r io.Reader) *Reader { + return NewCustomReader(NewTarballLoader(r)) +} + +// NewCustomReader returns a new Reader configured to use the +// specified DirectoryLoader. +func NewCustomReader(loader DirectoryLoader) *Reader { + nr := Reader{ + loader: loader, + metrics: metrics.New(), + files: make(map[string]FileInfo), + sizeLimitBytes: DefaultSizeLimitBytes + 1, + } + return &nr +} + +// IncludeManifestInData sets whether the manifest metadata should be +// included in the bundle's data. +func (r *Reader) IncludeManifestInData(includeManifestInData bool) *Reader { + r.includeManifestInData = includeManifestInData + return r +} + +// WithMetrics sets the metrics object to be used while loading bundles +func (r *Reader) WithMetrics(m metrics.Metrics) *Reader { + r.metrics = m + return r +} + +// WithBaseDir sets a base directory for file paths of loaded Rego +// modules. This will *NOT* affect the loaded path of data files. +func (r *Reader) WithBaseDir(dir string) *Reader { + r.baseDir = dir + return r +} + +// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle +func (r *Reader) WithBundleVerificationConfig(config *VerificationConfig) *Reader { + r.verificationConfig = config + return r +} + +// WithSkipBundleVerification skips verification of a signed bundle +func (r *Reader) WithSkipBundleVerification(skipVerify bool) *Reader { + r.skipVerify = skipVerify + return r +} + +// WithProcessAnnotations enables annotation processing during .rego file parsing. +func (r *Reader) WithProcessAnnotations(yes bool) *Reader { + r.processAnnotations = yes + return r +} + +// WithCapabilities sets the supported capabilities when loading the files +func (r *Reader) WithCapabilities(caps *ast.Capabilities) *Reader { + r.capabilities = caps + return r +} + +// WithJSONOptions sets the JSON options on the parser (now a no-op). +// +// Deprecated: Use SetOptions in the json package instead, where a longer description +// of why this is deprecated also can be found. +func (r *Reader) WithJSONOptions(*astJSON.Options) *Reader { + return r +} + +// WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger +// than this, an error will be returned by the reader. +func (r *Reader) WithSizeLimitBytes(n int64) *Reader { + r.sizeLimitBytes = n + 1 + return r +} + +// WithBundleEtag sets the given etag value on the bundle +func (r *Reader) WithBundleEtag(etag string) *Reader { + r.etag = etag + return r +} + +// WithBundleName specifies the bundle name +func (r *Reader) WithBundleName(name string) *Reader { + r.name = name + return r +} + +func (r *Reader) WithFollowSymlinks(yes bool) *Reader { + r.followSymlinks = yes + return r +} + +// WithLazyLoadingMode sets the bundle loading mode. If true, +// bundles will be read in lazy mode. In this mode, data files in the bundle will not be +// deserialized and the check to validate that the bundle data does not contain paths +// outside the bundle's roots will not be performed while reading the bundle. +func (r *Reader) WithLazyLoadingMode(yes bool) *Reader { + r.lazyLoadingMode = yes + return r +} + +// WithBundlePersistence specifies if the downloaded bundle will eventually be persisted to disk. +func (r *Reader) WithBundlePersistence(persist bool) *Reader { + r.persist = persist + return r +} + +func (r *Reader) WithRegoVersion(version ast.RegoVersion) *Reader { + r.regoVersion = version + return r +} + +func (r *Reader) ParserOptions() ast.ParserOptions { + return ast.ParserOptions{ + ProcessAnnotation: r.processAnnotations, + Capabilities: r.capabilities, + RegoVersion: r.regoVersion, + } +} + +// Read returns a new Bundle loaded from the reader. +func (r *Reader) Read() (Bundle, error) { + + var bundle Bundle + var descriptors []*Descriptor + var err error + var raw []Raw + + bundle.Signatures, bundle.Patch, descriptors, err = preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes) + if err != nil { + return bundle, err + } + + bundle.lazyLoadingMode = r.lazyLoadingMode + bundle.sizeLimitBytes = r.sizeLimitBytes + + if bundle.Type() == SnapshotBundleType { + err = r.checkSignaturesAndDescriptors(bundle.Signatures) + if err != nil { + return bundle, err + } + + bundle.Data = map[string]interface{}{} + } + + var modules []ModuleFile + for _, f := range descriptors { + buf, err := readFile(f, r.sizeLimitBytes) + if err != nil { + return bundle, err + } + + // verify the file content + if bundle.Type() == SnapshotBundleType && !bundle.Signatures.isEmpty() { + path := f.Path() + if r.baseDir != "" { + path = f.URL() + } + path = strings.TrimPrefix(path, "/") + + // check if the file is to be excluded from bundle verification + if r.isFileExcluded(path) { + delete(r.files, path) + } else { + if err = r.verifyBundleFile(path, buf); err != nil { + return bundle, err + } + } + } + + // Normalize the paths to use `/` separators + path := filepath.ToSlash(f.Path()) + + if strings.HasSuffix(path, RegoExt) { + fullPath := r.fullPath(path) + bs := buf.Bytes() + + // Modules are parsed after we've had a chance to read the manifest + mf := ModuleFile{ + URL: f.URL(), + Path: fullPath, + RelativePath: path, + Raw: bs, + } + modules = append(modules, mf) + + if r.lazyLoadingMode { + p := fullPath + if r.name != "" { + p = modulePathWithPrefix(r.name, fullPath) + } + + raw = append(raw, Raw{Path: p, Value: bs, module: &mf}) + } + } else if filepath.Base(path) == WasmFile { + bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{ + URL: f.URL(), + Path: r.fullPath(path), + Raw: buf.Bytes(), + }) + } else if filepath.Base(path) == PlanFile { + bundle.PlanModules = append(bundle.PlanModules, PlanModuleFile{ + URL: f.URL(), + Path: r.fullPath(path), + Raw: buf.Bytes(), + }) + } else if filepath.Base(path) == dataFile { + if r.lazyLoadingMode { + raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) + continue + } + + var value interface{} + + r.metrics.Timer(metrics.RegoDataParse).Start() + err := util.UnmarshalJSON(buf.Bytes(), &value) + r.metrics.Timer(metrics.RegoDataParse).Stop() + + if err != nil { + return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) + } + + if err := insertValue(&bundle, path, value); err != nil { + return bundle, err + } + + } else if filepath.Base(path) == yamlDataFile || filepath.Base(path) == ymlDataFile { + if r.lazyLoadingMode { + raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) + continue + } + + var value interface{} + + r.metrics.Timer(metrics.RegoDataParse).Start() + err := util.Unmarshal(buf.Bytes(), &value) + r.metrics.Timer(metrics.RegoDataParse).Stop() + + if err != nil { + return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) + } + + if err := insertValue(&bundle, path, value); err != nil { + return bundle, err + } + + } else if strings.HasSuffix(path, ManifestExt) { + if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil { + return bundle, fmt.Errorf("bundle load failed on manifest decode: %w", err) + } + } + } + + // Parse modules + popts := r.ParserOptions() + popts.RegoVersion = bundle.RegoVersion(popts.EffectiveRegoVersion()) + for _, mf := range modules { + modulePopts := popts + if regoVersion, err := bundle.RegoVersionForFile(mf.RelativePath, popts.EffectiveRegoVersion()); err != nil { + return bundle, err + } else if regoVersion != ast.RegoUndefined { + // We don't expect ast.RegoUndefined here, but don't override configured rego-version if we do just to be extra protective + modulePopts.RegoVersion = regoVersion + } + r.metrics.Timer(metrics.RegoModuleParse).Start() + mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, string(mf.Raw), modulePopts) + r.metrics.Timer(metrics.RegoModuleParse).Stop() + if err != nil { + return bundle, err + } + bundle.Modules = append(bundle.Modules, mf) + } + + if bundle.Type() == DeltaBundleType { + if len(bundle.Data) != 0 { + return bundle, errors.New("delta bundle expected to contain only patch file but data files found") + } + + if len(bundle.Modules) != 0 { + return bundle, errors.New("delta bundle expected to contain only patch file but policy files found") + } + + if len(bundle.WasmModules) != 0 { + return bundle, errors.New("delta bundle expected to contain only patch file but wasm files found") + } + + if r.persist { + return bundle, errors.New("'persist' property is true in config. persisting delta bundle to disk is not supported") + } + } + + // check if the bundle signatures specify any files that weren't found in the bundle + if bundle.Type() == SnapshotBundleType && len(r.files) != 0 { + extra := []string{} + for k := range r.files { + extra = append(extra, k) + } + return bundle, fmt.Errorf("file(s) %v specified in bundle signatures but not found in the target bundle", extra) + } + + if err := bundle.Manifest.validateAndInjectDefaults(bundle); err != nil { + return bundle, err + } + + // Inject the wasm module entrypoint refs into the WasmModuleFile structs + epMap := map[string][]string{} + for _, r := range bundle.Manifest.WasmResolvers { + epMap[r.Module] = append(epMap[r.Module], r.Entrypoint) + } + for i := range len(bundle.WasmModules) { + entrypoints := epMap[bundle.WasmModules[i].Path] + for _, entrypoint := range entrypoints { + ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint) + if err != nil { + return bundle, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err) + } + bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref) + } + } + + if r.includeManifestInData { + var metadata map[string]interface{} + + b, err := json.Marshal(&bundle.Manifest) + if err != nil { + return bundle, fmt.Errorf("bundle load failed on manifest marshal: %w", err) + } + + err = util.UnmarshalJSON(b, &metadata) + if err != nil { + return bundle, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err) + } + + // For backwards compatibility always write to the old unnamed manifest path + // This will *not* be correct if >1 bundle is in use... + if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil { + return bundle, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err) + } + } + + bundle.Etag = r.etag + bundle.Raw = raw + + return bundle, nil +} + +func (r *Reader) isFileExcluded(path string) bool { + for _, e := range r.verificationConfig.Exclude { + match, _ := filepath.Match(e, path) + if match { + return true + } + } + return false +} + +func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) error { + if r.skipVerify { + return nil + } + + if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" { + return errors.New("bundle missing .signatures.json file") + } + + if !signatures.isEmpty() { + if r.verificationConfig == nil { + return errors.New("verification key not provided") + } + + // verify the JWT signatures included in the `.signatures.json` file + if err := r.verifyBundleSignature(signatures); err != nil { + return err + } + } + return nil +} + +func (r *Reader) verifyBundleSignature(sc SignaturesConfig) error { + var err error + r.files, err = VerifyBundleSignature(sc, r.verificationConfig) + return err +} + +func (r *Reader) verifyBundleFile(path string, data bytes.Buffer) error { + return VerifyBundleFile(path, data, r.files) +} + +func (r *Reader) fullPath(path string) string { + if r.baseDir != "" { + path = filepath.Join(r.baseDir, path) + } + return path +} + +// Write is deprecated. Use NewWriter instead. +func Write(w io.Writer, bundle Bundle) error { + return NewWriter(w). + UseModulePath(true). + DisableFormat(true). + Write(bundle) +} + +// Writer implements bundle serialization. +type Writer struct { + usePath bool + disableFormat bool + w io.Writer +} + +// NewWriter returns a bundle writer that writes to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + } +} + +// UseModulePath configures the writer to use the module file path instead of the +// module file URL during serialization. This is for backwards compatibility. +func (w *Writer) UseModulePath(yes bool) *Writer { + w.usePath = yes + return w +} + +// DisableFormat configures the writer to just write out raw bytes instead +// of formatting modules before serialization. +func (w *Writer) DisableFormat(yes bool) *Writer { + w.disableFormat = yes + return w +} + +// Write writes the bundle to the writer's output stream. +func (w *Writer) Write(bundle Bundle) error { + gw := gzip.NewWriter(w.w) + tw := tar.NewWriter(gw) + + bundleType := bundle.Type() + + if bundleType == SnapshotBundleType { + var buf bytes.Buffer + + if err := json.NewEncoder(&buf).Encode(bundle.Data); err != nil { + return err + } + + if err := archive.WriteFile(tw, "data.json", buf.Bytes()); err != nil { + return err + } + + for _, module := range bundle.Modules { + path := module.URL + if w.usePath { + path = module.Path + } + + if err := archive.WriteFile(tw, path, module.Raw); err != nil { + return err + } + } + + if err := w.writeWasm(tw, bundle); err != nil { + return err + } + + if err := writeSignatures(tw, bundle); err != nil { + return err + } + + if err := w.writePlan(tw, bundle); err != nil { + return err + } + } else if bundleType == DeltaBundleType { + if err := writePatch(tw, bundle); err != nil { + return err + } + } + + if err := writeManifest(tw, bundle); err != nil { + return err + } + + if err := tw.Close(); err != nil { + return err + } + + return gw.Close() +} + +func (w *Writer) writeWasm(tw *tar.Writer, bundle Bundle) error { + for _, wm := range bundle.WasmModules { + path := wm.URL + if w.usePath { + path = wm.Path + } + + err := archive.WriteFile(tw, path, wm.Raw) + if err != nil { + return err + } + } + + if len(bundle.Wasm) > 0 { + err := archive.WriteFile(tw, "/"+WasmFile, bundle.Wasm) + if err != nil { + return err + } + } + + return nil +} + +func (w *Writer) writePlan(tw *tar.Writer, bundle Bundle) error { + for _, wm := range bundle.PlanModules { + path := wm.URL + if w.usePath { + path = wm.Path + } + + err := archive.WriteFile(tw, path, wm.Raw) + if err != nil { + return err + } + } + + return nil +} + +func writeManifest(tw *tar.Writer, bundle Bundle) error { + + if bundle.Manifest.Empty() { + return nil + } + + var buf bytes.Buffer + + if err := json.NewEncoder(&buf).Encode(bundle.Manifest); err != nil { + return err + } + + return archive.WriteFile(tw, ManifestExt, buf.Bytes()) +} + +func writePatch(tw *tar.Writer, bundle Bundle) error { + + var buf bytes.Buffer + + if err := json.NewEncoder(&buf).Encode(bundle.Patch); err != nil { + return err + } + + return archive.WriteFile(tw, patchFile, buf.Bytes()) +} + +func writeSignatures(tw *tar.Writer, bundle Bundle) error { + + if bundle.Signatures.isEmpty() { + return nil + } + + bs, err := json.MarshalIndent(bundle.Signatures, "", " ") + if err != nil { + return err + } + + return archive.WriteFile(tw, fmt.Sprintf(".%v", SignaturesFile), bs) +} + +func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { + + files := []FileInfo{} + + bs, err := hash.HashFile(b.Data) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix("data.json", "/"), hex.EncodeToString(bs), defaultHashingAlg)) + + if len(b.Wasm) != 0 { + bs, err := hash.HashFile(b.Wasm) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix(WasmFile, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + for _, wasmModule := range b.WasmModules { + bs, err := hash.HashFile(wasmModule.Raw) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix(wasmModule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + for _, planmodule := range b.PlanModules { + bs, err := hash.HashFile(planmodule.Raw) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix(planmodule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + // If the manifest is essentially empty, don't add it to the signatures since it + // won't be written to the bundle. Otherwise: + // parse the manifest into a JSON structure; + // then recursively order the fields of all objects alphabetically and then apply + // the hash function to result to compute the hash. + if !b.Manifest.Empty() { + mbs, err := json.Marshal(b.Manifest) + if err != nil { + return files, err + } + + var result map[string]interface{} + if err := util.Unmarshal(mbs, &result); err != nil { + return files, err + } + + bs, err = hash.HashFile(result) + if err != nil { + return files, err + } + + files = append(files, NewFile(strings.TrimPrefix(ManifestExt, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + return files, err +} + +// FormatModules formats Rego modules +// Modules will be formatted to comply with [ast.DefaultRegoVersion], but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). +func (b *Bundle) FormatModules(useModulePath bool) error { + return b.FormatModulesForRegoVersion(ast.DefaultRegoVersion, true, useModulePath) +} + +// FormatModulesForRegoVersion formats Rego modules to comply with a given Rego version +func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveModuleRegoVersion bool, useModulePath bool) error { + var err error + + for i, module := range b.Modules { + opts := format.Opts{} + if preserveModuleRegoVersion { + opts.RegoVersion = module.Parsed.RegoVersion() + opts.ParserOptions = &ast.ParserOptions{ + RegoVersion: opts.RegoVersion, + } + } else { + opts.RegoVersion = version + } + + if module.Raw == nil { + module.Raw, err = format.AstWithOpts(module.Parsed, opts) + if err != nil { + return err + } + } else { + path := module.URL + if useModulePath { + path = module.Path + } + + module.Raw, err = format.SourceWithOpts(path, module.Raw, opts) + if err != nil { + return err + } + } + b.Modules[i].Raw = module.Raw + } + return nil +} + +// GenerateSignature generates the signature for the given bundle. +func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, useModulePath bool) error { + + hash, err := NewSignatureHasher(HashingAlgorithm(defaultHashingAlg)) + if err != nil { + return err + } + + files := []FileInfo{} + + for _, module := range b.Modules { + bytes, err := hash.HashFile(module.Raw) + if err != nil { + return err + } + + path := module.URL + if useModulePath { + path = module.Path + } + files = append(files, NewFile(strings.TrimPrefix(path, "/"), hex.EncodeToString(bytes), defaultHashingAlg)) + } + + result, err := hashBundleFiles(hash, b) + if err != nil { + return err + } + files = append(files, result...) + + // generate signed token + token, err := GenerateSignedToken(files, signingConfig, keyID) + if err != nil { + return err + } + + if b.Signatures.isEmpty() { + b.Signatures = SignaturesConfig{} + } + + if signingConfig.Plugin != "" { + b.Signatures.Plugin = signingConfig.Plugin + } + + b.Signatures.Signatures = []string{token} + + return nil +} + +// ParsedModules returns a map of parsed modules with names that are +// unique and human readable for the given a bundle name. +func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module { + + mods := make(map[string]*ast.Module, len(b.Modules)) + + for _, mf := range b.Modules { + mods[modulePathWithPrefix(bundleName, mf.Path)] = mf.Parsed + } + + return mods +} + +func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion { + if v := b.Manifest.RegoVersion; v != nil { + if *v == 0 { + return ast.RegoV0 + } else if *v == 1 { + return ast.RegoV1 + } + } + return def +} + +func (b *Bundle) SetRegoVersion(v ast.RegoVersion) { + b.Manifest.SetRegoVersion(v) +} + +// RegoVersionForFile returns the rego-version for the specified file path. +// If there is no defined version for the given path, the default version def is returned. +// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned. +func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) { + version, err := b.Manifest.numericRegoVersionForFile(path) + if err != nil { + return def, err + } else if version == nil { + return def, nil + } else if *version == 0 { + return ast.RegoV0, nil + } else if *version == 1 { + return ast.RegoV1, nil + } + return def, fmt.Errorf("unknown bundle rego-version %d for file '%s'", *version, path) +} + +func (m *Manifest) RegoVersionForFile(path string) (ast.RegoVersion, error) { + v, err := m.numericRegoVersionForFile(path) + if err != nil { + return ast.RegoUndefined, err + } + + if v == nil { + return ast.RegoUndefined, nil + } + + return ast.RegoVersionFromInt(*v), nil +} + +func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) { + var version *int + + if len(m.FileRegoVersions) != len(m.compiledFileRegoVersions) { + m.compiledFileRegoVersions = make([]fileRegoVersion, 0, len(m.FileRegoVersions)) + for pattern, v := range m.FileRegoVersions { + compiled, err := glob.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("failed to compile glob pattern %s: %s", pattern, err) + } + m.compiledFileRegoVersions = append(m.compiledFileRegoVersions, fileRegoVersion{compiled, v}) + } + } + + for _, fv := range m.compiledFileRegoVersions { + if fv.path.Match(path) { + version = &fv.version + break + } + } + + if version == nil { + version = m.RegoVersion + } + return version, nil +} + +// Equal returns true if this bundle's contents equal the other bundle's +// contents. +func (b Bundle) Equal(other Bundle) bool { + if !reflect.DeepEqual(b.Data, other.Data) { + return false + } + + if len(b.Modules) != len(other.Modules) { + return false + } + for i := range b.Modules { + // To support bundles built from rootless filesystems we ignore a "/" prefix + // for URLs and Paths, such that "/file" and "file" are equivalent + if strings.TrimPrefix(b.Modules[i].URL, string(filepath.Separator)) != + strings.TrimPrefix(other.Modules[i].URL, string(filepath.Separator)) { + return false + } + if strings.TrimPrefix(b.Modules[i].Path, string(filepath.Separator)) != + strings.TrimPrefix(other.Modules[i].Path, string(filepath.Separator)) { + return false + } + if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) { + return false + } + if !bytes.Equal(b.Modules[i].Raw, other.Modules[i].Raw) { + return false + } + } + if (b.Wasm == nil && other.Wasm != nil) || (b.Wasm != nil && other.Wasm == nil) { + return false + } + + return bytes.Equal(b.Wasm, other.Wasm) +} + +// Copy returns a deep copy of the bundle. +func (b Bundle) Copy() Bundle { + + // Copy data. + var x interface{} = b.Data + + if err := util.RoundTrip(&x); err != nil { + panic(err) + } + + if x != nil { + b.Data = x.(map[string]interface{}) + } + + // Copy modules. + for i := range b.Modules { + bs := make([]byte, len(b.Modules[i].Raw)) + copy(bs, b.Modules[i].Raw) + b.Modules[i].Raw = bs + b.Modules[i].Parsed = b.Modules[i].Parsed.Copy() + } + + // Copy manifest. + b.Manifest = b.Manifest.Copy() + + return b +} + +func (b *Bundle) insertData(key []string, value interface{}) error { + // Build an object with the full structure for the value + obj, err := mktree(key, value) + if err != nil { + return err + } + + // Merge the new data in with the current bundle data object + merged, ok := merge.InterfaceMaps(b.Data, obj) + if !ok { + return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) + } + + b.Data = merged + + return nil +} + +func (b *Bundle) readData(key []string) *interface{} { + + if len(key) == 0 { + if len(b.Data) == 0 { + return nil + } + var result interface{} = b.Data + return &result + } + + node := b.Data + + for i := range len(key) - 1 { + + child, ok := node[key[i]] + if !ok { + return nil + } + + childObj, ok := child.(map[string]interface{}) + if !ok { + return nil + } + + node = childObj + } + + child, ok := node[key[len(key)-1]] + if !ok { + return nil + } + + return &child +} + +// Type returns the type of the bundle. +func (b *Bundle) Type() string { + if len(b.Patch.Data) != 0 { + return DeltaBundleType + } + return SnapshotBundleType +} + +func mktree(path []string, value interface{}) (map[string]interface{}, error) { + if len(path) == 0 { + // For 0 length path the value is the full tree. + obj, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.New("root value must be object") + } + return obj, nil + } + + dir := map[string]interface{}{} + for i := len(path) - 1; i > 0; i-- { + dir[path[i]] = value + value = dir + dir = map[string]interface{}{} + } + dir[path[0]] = value + + return dir, nil +} + +// Merge accepts a set of bundles and merges them into a single result bundle. If there are +// any conflicts during the merge (e.g., with roots) an error is returned. The result bundle +// will have an empty revision except in the special case where a single bundle is provided +// (and in that case the bundle is just returned unmodified.) +func Merge(bundles []*Bundle) (*Bundle, error) { + return MergeWithRegoVersion(bundles, ast.DefaultRegoVersion, false) +} + +// MergeWithRegoVersion creates a merged bundle from the provided bundles, similar to Merge. +// If more than one bundle is provided, the rego version of the result bundle is set to the provided regoVersion. +// Any Rego files in a bundle of conflicting rego version will be marked in the result's manifest with the rego version +// of its original bundle. If the Rego file already had an overriding rego version, it will be preserved. +// If a single bundle is provided, it will retain any rego version information it already had. If it has none, the +// provided regoVersion will be applied to it. +// If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's +// ModuleFile.URL will be used. +func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) { + + if len(bundles) == 0 { + return nil, errors.New("expected at least one bundle") + } + + if regoVersion == ast.RegoUndefined { + regoVersion = ast.DefaultRegoVersion + } + + if len(bundles) == 1 { + result := bundles[0] + // We respect the bundle rego-version, defaulting to the provided rego version if not set. + result.SetRegoVersion(result.RegoVersion(regoVersion)) + fileRegoVersions, err := bundleRegoVersions(result, result.RegoVersion(regoVersion), usePath) + if err != nil { + return nil, err + } + result.Manifest.FileRegoVersions = fileRegoVersions + return result, nil + } + + var roots []string + var result Bundle + + for _, b := range bundles { + + if b.Manifest.Roots == nil { + return nil, errors.New("bundle manifest not initialized") + } + + roots = append(roots, *b.Manifest.Roots...) + + result.Modules = append(result.Modules, b.Modules...) + + for _, root := range *b.Manifest.Roots { + key := strings.Split(root, "/") + if val := b.readData(key); val != nil { + if err := result.insertData(key, *val); err != nil { + return nil, err + } + } + } + + result.Manifest.WasmResolvers = append(result.Manifest.WasmResolvers, b.Manifest.WasmResolvers...) + result.WasmModules = append(result.WasmModules, b.WasmModules...) + result.PlanModules = append(result.PlanModules, b.PlanModules...) + + if b.Manifest.RegoVersion != nil || len(b.Manifest.FileRegoVersions) > 0 { + if result.Manifest.FileRegoVersions == nil { + result.Manifest.FileRegoVersions = map[string]int{} + } + + fileRegoVersions, err := bundleRegoVersions(b, regoVersion, usePath) + if err != nil { + return nil, err + } + for k, v := range fileRegoVersions { + result.Manifest.FileRegoVersions[k] = v + } + } + } + + // We respect the bundle rego-version, defaulting to the provided rego version if not set. + result.SetRegoVersion(result.RegoVersion(regoVersion)) + + if result.Data == nil { + result.Data = map[string]interface{}{} + } + + result.Manifest.Roots = &roots + + if err := result.Manifest.validateAndInjectDefaults(result); err != nil { + return nil, err + } + + return &result, nil +} + +func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath bool) (map[string]int, error) { + fileRegoVersions := map[string]int{} + + // we drop the bundle-global rego versions and record individual rego versions for each module. + for _, m := range bundle.Modules { + // We fetch rego-version by the path relative to the bundle root, as the complete path of the module might + // contain the path between OPA working directory and the bundle root. + v, err := bundle.RegoVersionForFile(bundleRelativePath(m, usePath), bundle.RegoVersion(regoVersion)) + if err != nil { + return nil, err + } + + // only record the rego version if it's different from the one applied globally to the result bundle + if v != ast.RegoUndefined { + if regoVersion == ast.RegoUndefined { + // We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path + // to the module inside the merged bundle. + fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int() + } else { + vInt := v.Int() + gVInt := regoVersion.Int() + if vInt != gVInt { + fileRegoVersions[bundleAbsolutePath(m, usePath)] = vInt + } + } + } + } + + return fileRegoVersions, nil +} + +func bundleRelativePath(m ModuleFile, usePath bool) string { + p := m.RelativePath + if p == "" { + if usePath { + p = m.Path + } else { + p = m.URL + } + } + return p +} + +func bundleAbsolutePath(m ModuleFile, usePath bool) string { + var p string + if usePath { + p = m.Path + } else { + p = m.URL + } + if !path.IsAbs(p) { + p = "/" + p + } + return path.Clean(p) +} + +// RootPathsOverlap takes in two bundle root paths and returns true if they overlap. +func RootPathsOverlap(pathA string, pathB string) bool { + a := rootPathSegments(pathA) + b := rootPathSegments(pathB) + return rootContains(a, b) || rootContains(b, a) +} + +// RootPathsContain takes a set of bundle root paths and returns true if the path is contained. +func RootPathsContain(roots []string, path string) bool { + segments := rootPathSegments(path) + for i := range roots { + if rootContains(rootPathSegments(roots[i]), segments) { + return true + } + } + return false +} + +func rootPathSegments(path string) []string { + return strings.Split(path, "/") +} + +func rootContains(root []string, other []string) bool { + + // A single segment, empty string root always contains the other. + if len(root) == 1 && root[0] == "" { + return true + } + + if len(root) > len(other) { + return false + } + + for j := range root { + if root[j] != other[j] { + return false + } + } + + return true +} + +func insertValue(b *Bundle, path string, value interface{}) error { + if err := b.insertData(getNormalizedPath(path), value); err != nil { + return fmt.Errorf("bundle load failed on %v: %w", path, err) + } + return nil +} + +func getNormalizedPath(path string) []string { + // Remove leading / and . characters from the directory path. If the bundle + // was written with OPA then the paths will contain a leading slash. On the + // other hand, if the path is empty, filepath.Dir will return '.'. + // Note: filepath.Dir can return paths with '\' separators, always use + // filepath.ToSlash to keep them normalized. + dirpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.") + var key []string + if dirpath != "" { + key = strings.Split(dirpath, "/") + } + return key +} + +func dfs(value interface{}, path string, fn func(string, interface{}) (bool, error)) error { + if stop, err := fn(path, value); err != nil { + return err + } else if stop { + return nil + } + obj, ok := value.(map[string]interface{}) + if !ok { + return nil + } + for key := range obj { + if err := dfs(obj[key], path+"/"+key, fn); err != nil { + return err + } + } + return nil +} + +func modulePathWithPrefix(bundleName string, modulePath string) string { + // Default prefix is just the bundle name + prefix := bundleName + + // Bundle names are sometimes just file paths, some of which + // are full urls (file:///foo/). Parse these and only use the path. + parsed, err := url.Parse(bundleName) + if err == nil { + prefix = filepath.Join(parsed.Host, parsed.Path) + } + + // Note: filepath.Join can return paths with '\' separators, always use + // filepath.ToSlash to keep them normalized. + return normalizePath(filepath.Join(prefix, modulePath)) +} + +// IsStructuredDoc checks if the file name equals a structured file extension ex. ".json" +func IsStructuredDoc(name string) bool { + return filepath.Base(name) == dataFile || filepath.Base(name) == yamlDataFile || + filepath.Base(name) == SignaturesFile || filepath.Base(name) == ManifestExt +} + +func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (SignaturesConfig, Patch, []*Descriptor, error) { + descriptors := []*Descriptor{} + var signatures SignaturesConfig + var patch Patch + + for { + f, err := loader.NextFile() + if err == io.EOF { + break + } + + if err != nil { + return signatures, patch, nil, fmt.Errorf("bundle read failed: %w", err) + } + + // check for the signatures file + if !skipVerify && strings.HasSuffix(f.Path(), SignaturesFile) { + buf, err := readFile(f, sizeLimitBytes) + if err != nil { + return signatures, patch, nil, err + } + + if err := util.NewJSONDecoder(&buf).Decode(&signatures); err != nil { + return signatures, patch, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err) + } + } else if !strings.HasSuffix(f.Path(), SignaturesFile) { + descriptors = append(descriptors, f) + + if filepath.Base(f.Path()) == patchFile { + + var b bytes.Buffer + tee := io.TeeReader(f.reader, &b) + f.reader = tee + + buf, err := readFile(f, sizeLimitBytes) + if err != nil { + return signatures, patch, nil, err + } + + if err := util.NewJSONDecoder(&buf).Decode(&patch); err != nil { + return signatures, patch, nil, fmt.Errorf("bundle load failed on patch decode: %w", err) + } + + f.reader = &b + } + } + } + return signatures, patch, descriptors, nil +} + +func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { + // Case for pre-loaded byte buffers, like those from the tarballLoader. + if bb, ok := f.reader.(*bytes.Buffer); ok { + _ = f.Close() // always close, even on error + + if int64(bb.Len()) >= sizeLimitBytes { + return *bb, fmt.Errorf("bundle file '%v' size (%d bytes) exceeded max size (%v bytes)", + strings.TrimPrefix(f.Path(), "/"), bb.Len(), sizeLimitBytes-1) + } + + return *bb, nil + } + + // Case for *lazyFile readers: + if lf, ok := f.reader.(*lazyFile); ok { + var buf bytes.Buffer + if lf.file == nil { + var err error + if lf.file, err = os.Open(lf.path); err != nil { + return buf, fmt.Errorf("failed to open file %s: %w", f.path, err) + } + } + // Bail out if we can't read the whole file-- there's nothing useful we can do at that point! + fileSize, _ := fstatFileSize(lf.file) + if fileSize > sizeLimitBytes { + return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), fileSize, sizeLimitBytes-1) + } + // Prealloc the buffer for the file read. + buffer := make([]byte, fileSize) + _, err := io.ReadFull(lf.file, buffer) + if err != nil { + return buf, err + } + _ = lf.file.Close() // always close, even on error + + // Note(philipc): Replace the lazyFile reader in the *Descriptor with a + // pointer to the wrapping bytes.Buffer, so that we don't re-read the + // file on disk again by accident. + buf = *bytes.NewBuffer(buffer) + f.reader = &buf + return buf, nil + } + + // Fallback case: + var buf bytes.Buffer + n, err := f.Read(&buf, sizeLimitBytes) + _ = f.Close() // always close, even on error + + if err != nil && err != io.EOF { + return buf, err + } else if err == nil && n >= sizeLimitBytes { + return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), n, sizeLimitBytes-1) + } + + return buf, nil +} + +// Takes an already open file handle and invokes the os.Stat system call on it +// to determine the file's size. Passes any errors from *File.Stat on up to the +// caller. +func fstatFileSize(f *os.File) (int64, error) { + fileInfo, err := f.Stat() + if err != nil { + return 0, err + } + return fileInfo.Size(), nil +} + +func normalizePath(p string) string { + return filepath.ToSlash(p) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go new file mode 100644 index 00000000000..12e159254cf --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go @@ -0,0 +1,517 @@ +package bundle + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/open-policy-agent/opa/v1/loader/filter" + + "github.com/open-policy-agent/opa/v1/storage" +) + +const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)" + +// Descriptor contains information about a file and +// can be used to read the file contents. +type Descriptor struct { + url string + path string + reader io.Reader + closer io.Closer + closeOnce *sync.Once +} + +// lazyFile defers reading the file until the first call of Read +type lazyFile struct { + path string + file *os.File +} + +// newLazyFile creates a new instance of lazyFile +func newLazyFile(path string) *lazyFile { + return &lazyFile{path: path} +} + +// Read implements io.Reader. It will check if the file has been opened +// and open it if it has not before attempting to read using the file's +// read method +func (f *lazyFile) Read(b []byte) (int, error) { + var err error + + if f.file == nil { + if f.file, err = os.Open(f.path); err != nil { + return 0, fmt.Errorf("failed to open file %s: %w", f.path, err) + } + } + + return f.file.Read(b) +} + +// Close closes the lazy file if it has been opened using the file's +// close method +func (f *lazyFile) Close() error { + if f.file != nil { + return f.file.Close() + } + + return nil +} + +func NewDescriptor(url, path string, reader io.Reader) *Descriptor { + return &Descriptor{ + url: url, + path: path, + reader: reader, + } +} + +func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor { + d.closer = closer + d.closeOnce = new(sync.Once) + return d +} + +// Path returns the path of the file. +func (d *Descriptor) Path() string { + return d.path +} + +// URL returns the url of the file. +func (d *Descriptor) URL() string { + return d.url +} + +// Read will read all the contents from the file the Descriptor refers to +// into the dest writer up n bytes. Will return an io.EOF error +// if EOF is encountered before n bytes are read. +func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) { + n, err := io.CopyN(dest, d.reader, n) + return n, err +} + +// Close the file, on some Loader implementations this might be a no-op. +// It should *always* be called regardless of file. +func (d *Descriptor) Close() error { + var err error + if d.closer != nil { + d.closeOnce.Do(func() { + err = d.closer.Close() + }) + } + return err +} + +type PathFormat int64 + +const ( + Chrooted PathFormat = iota + SlashRooted + Passthrough +) + +// DirectoryLoader defines an interface which can be used to load +// files from a directory by iterating over each one in the tree. +type DirectoryLoader interface { + // NextFile must return io.EOF if there is no next value. The returned + // descriptor should *always* be closed when no longer needed. + NextFile() (*Descriptor, error) + WithFilter(filter filter.LoaderFilter) DirectoryLoader + WithPathFormat(PathFormat) DirectoryLoader + WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader + WithFollowSymlinks(followSymlinks bool) DirectoryLoader +} + +type dirLoader struct { + root string + files []string + idx int + filter filter.LoaderFilter + pathFormat PathFormat + maxSizeLimitBytes int64 + followSymlinks bool +} + +// Normalize root directory, ex "./src/bundle" -> "src/bundle" +// We don't need an absolute path, but this makes the joined/trimmed +// paths more uniform. +func normalizeRootDirectory(root string) string { + if len(root) > 1 { + if root[0] == '.' && root[1] == filepath.Separator { + if len(root) == 2 { + root = root[:1] // "./" -> "." + } else { + root = root[2:] // remove leading "./" + } + } + } + return root +} + +// NewDirectoryLoader returns a basic DirectoryLoader implementation +// that will load files from a given root directory path. +func NewDirectoryLoader(root string) DirectoryLoader { + d := dirLoader{ + root: normalizeRootDirectory(root), + pathFormat: Chrooted, + } + return &d +} + +// WithFilter specifies the filter object to use to filter files while loading bundles +func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { + d.filter = filter + return d +} + +// WithPathFormat specifies how a path is formatted in a Descriptor +func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + d.pathFormat = pathFormat + return d +} + +// WithSizeLimitBytes specifies the maximum size of any file in the directory to read +func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { + d.maxSizeLimitBytes = sizeLimitBytes + return d +} + +// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory +func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { + d.followSymlinks = followSymlinks + return d +} + +func formatPath(fileName string, root string, pathFormat PathFormat) string { + switch pathFormat { + case SlashRooted: + if !strings.HasPrefix(fileName, string(filepath.Separator)) { + return string(filepath.Separator) + fileName + } + return fileName + case Chrooted: + // Trim off the root directory and return path as if chrooted + result := strings.TrimPrefix(fileName, filepath.FromSlash(root)) + if root == "." && filepath.Base(fileName) == ManifestExt { + result = fileName + } + if !strings.HasPrefix(result, string(filepath.Separator)) { + result = string(filepath.Separator) + result + } + return result + case Passthrough: + fallthrough + default: + return fileName + } +} + +// NextFile iterates to the next file in the directory tree +// and returns a file Descriptor for the file. +func (d *dirLoader) NextFile() (*Descriptor, error) { + // build a list of all files we will iterate over and read, but only one time + if d.files == nil { + d.files = []string{} + err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error { + if info == nil { + return nil + } + + if info.Mode().IsRegular() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) + } + d.files = append(d.files, path) + } else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) + } + d.files = append(d.files, path) + } else if info.Mode().IsDir() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { + return filepath.SkipDir + } + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list files: %w", err) + } + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if d.idx >= len(d.files) { + return nil, io.EOF + } + + fileName := d.files[d.idx] + d.idx++ + fh := newLazyFile(fileName) + + cleanedPath := formatPath(fileName, d.root, d.pathFormat) + f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh) + return f, nil +} + +type tarballLoader struct { + baseURL string + r io.Reader + tr *tar.Reader + files []file + idx int + filter filter.LoaderFilter + skipDir map[string]struct{} + pathFormat PathFormat + maxSizeLimitBytes int64 +} + +type file struct { + name string + reader io.Reader + path storage.Path + raw []byte +} + +// NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead. +func NewTarballLoader(r io.Reader) DirectoryLoader { + l := tarballLoader{ + r: r, + pathFormat: Passthrough, + } + return &l +} + +// NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads +// files out of a gzipped tar archive. The file URLs will be prefixed +// with the baseURL. +func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader { + l := tarballLoader{ + baseURL: strings.TrimSuffix(baseURL, "/"), + r: r, + pathFormat: Passthrough, + } + return &l +} + +// WithFilter specifies the filter object to use to filter files while loading bundles +func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { + t.filter = filter + return t +} + +// WithPathFormat specifies how a path is formatted in a Descriptor +func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + t.pathFormat = pathFormat + return t +} + +// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read +func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { + t.maxSizeLimitBytes = sizeLimitBytes + return t +} + +// WithFollowSymlinks is a no-op for tarballLoader +func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader { + return t +} + +// NextFile iterates to the next file in the directory tree +// and returns a file Descriptor for the file. +func (t *tarballLoader) NextFile() (*Descriptor, error) { + if t.tr == nil { + gr, err := gzip.NewReader(t.r) + if err != nil { + return nil, fmt.Errorf("archive read failed: %w", err) + } + + t.tr = tar.NewReader(gr) + } + + if t.files == nil { + t.files = []file{} + + if t.skipDir == nil { + t.skipDir = map[string]struct{}{} + } + + for { + header, err := t.tr.Next() + + if err == io.EOF { + break + } + + if err != nil { + return nil, err + } + + // Keep iterating on the archive until we find a normal file + if header.Typeflag == tar.TypeReg { + + if t.filter != nil { + + if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) { + continue + } + + basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/") + + // check if the directory is to be skipped + if _, ok := t.skipDir[basePath]; ok { + continue + } + + match := false + for p := range t.skipDir { + if strings.HasPrefix(basePath, p) { + match = true + break + } + } + + if match { + continue + } + } + + if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes { + return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes) + } + + f := file{name: header.Name} + + // Note(philipc): We rely on the previous size check in this loop for safety. + buf := bytes.NewBuffer(make([]byte, 0, header.Size)) + if _, err := io.Copy(buf, t.tr); err != nil { + return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err) + } + + f.reader = buf + + t.files = append(t.files, f) + } else if header.Typeflag == tar.TypeDir { + cleanedPath := filepath.ToSlash(header.Name) + if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) { + t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{} + } + } + } + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if t.idx >= len(t.files) { + return nil, io.EOF + } + + f := t.files[t.idx] + t.idx++ + + cleanedPath := formatPath(f.name, "", t.pathFormat) + d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader) + return d, nil +} + +// Next implements the storage.Iterator interface. +// It iterates to the next policy or data file in the directory tree +// and returns a storage.Update for the file. +func (it *iterator) Next() (*storage.Update, error) { + if it.files == nil { + it.files = []file{} + + for _, item := range it.raw { + f := file{name: item.Path} + + p, err := getFileStoragePath(f.name) + if err != nil { + return nil, err + } + + f.path = p + + f.raw = item.Value + + it.files = append(it.files, f) + } + + sortFilePathAscend(it.files) + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if it.idx >= len(it.files) { + return nil, io.EOF + } + + f := it.files[it.idx] + it.idx++ + + isPolicy := false + if strings.HasSuffix(f.name, RegoExt) { + isPolicy = true + } + + return &storage.Update{ + Path: f.path, + Value: f.raw, + IsPolicy: isPolicy, + }, nil +} + +type iterator struct { + raw []Raw + files []file + idx int +} + +func NewIterator(raw []Raw) storage.Iterator { + it := iterator{ + raw: raw, + } + return &it +} + +func sortFilePathAscend(files []file) { + sort.Slice(files, func(i, j int) bool { + return len(files[i].path) < len(files[j].path) + }) +} + +func getdepth(path string, isDir bool) int { + if isDir { + cleanedPath := strings.Trim(filepath.ToSlash(path), "/") + return len(strings.Split(cleanedPath, "/")) + } + + basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/") + return len(strings.Split(basePath, "/")) +} + +func getFileStoragePath(path string) (storage.Path, error) { + fpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.") + if strings.HasSuffix(path, RegoExt) { + fpath = strings.Trim(normalizePath(path), "/") + } + + p, ok := storage.ParsePathEscaped("/" + fpath) + if !ok { + return nil, fmt.Errorf("storage path invalid: %v", path) + } + return p, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go new file mode 100644 index 00000000000..7ab3de989c1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go @@ -0,0 +1,143 @@ +//go:build go1.16 +// +build go1.16 + +package bundle + +import ( + "fmt" + "io" + "io/fs" + "path/filepath" + "sync" + + "github.com/open-policy-agent/opa/v1/loader/filter" +) + +const ( + defaultFSLoaderRoot = "." +) + +type dirLoaderFS struct { + sync.Mutex + filesystem fs.FS + files []string + idx int + filter filter.LoaderFilter + root string + pathFormat PathFormat + maxSizeLimitBytes int64 + followSymlinks bool +} + +// NewFSLoader returns a basic DirectoryLoader implementation +// that will load files from a fs.FS interface +func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) { + return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil +} + +// NewFSLoaderWithRoot returns a basic DirectoryLoader implementation +// that will load files from a fs.FS interface at the supplied root +func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader { + d := dirLoaderFS{ + filesystem: filesystem, + root: normalizeRootDirectory(root), + pathFormat: Chrooted, + } + + return &d +} + +func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error { + if err != nil { + return err + } + + if dirEntry != nil { + info, err := dirEntry.Info() + if err != nil { + return err + } + + if dirEntry.Type().IsRegular() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) + } + + d.files = append(d.files, path) + } else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) + } + + d.files = append(d.files, path) + } else if dirEntry.Type().IsDir() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { + return fs.SkipDir + } + } + } + return nil +} + +// WithFilter specifies the filter object to use to filter files while loading bundles +func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader { + d.filter = filter + return d +} + +// WithPathFormat specifies how a path is formatted in a Descriptor +func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + d.pathFormat = pathFormat + return d +} + +// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read +func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { + d.maxSizeLimitBytes = sizeLimitBytes + return d +} + +func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { + d.followSymlinks = followSymlinks + return d +} + +// NextFile iterates to the next file in the directory tree +// and returns a file Descriptor for the file. +func (d *dirLoaderFS) NextFile() (*Descriptor, error) { + d.Lock() + defer d.Unlock() + + if d.files == nil { + err := fs.WalkDir(d.filesystem, d.root, d.walkDir) + if err != nil { + return nil, fmt.Errorf("failed to list files: %w", err) + } + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if d.idx >= len(d.files) { + return nil, io.EOF + } + + fileName := d.files[d.idx] + d.idx++ + + fh, err := d.filesystem.Open(fileName) + if err != nil { + return nil, fmt.Errorf("failed to open file %s: %w", fileName, err) + } + + cleanedPath := formatPath(fileName, d.root, d.pathFormat) + f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh) + return f, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go new file mode 100644 index 00000000000..ab6fcd0f381 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go @@ -0,0 +1,136 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package bundle + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/json" + "fmt" + "hash" + "io" + "strings" + + "github.com/open-policy-agent/opa/v1/util" +) + +// HashingAlgorithm represents a subset of hashing algorithms implemented in Go +type HashingAlgorithm string + +// Supported values for HashingAlgorithm +const ( + MD5 HashingAlgorithm = "MD5" + SHA1 HashingAlgorithm = "SHA-1" + SHA224 HashingAlgorithm = "SHA-224" + SHA256 HashingAlgorithm = "SHA-256" + SHA384 HashingAlgorithm = "SHA-384" + SHA512 HashingAlgorithm = "SHA-512" + SHA512224 HashingAlgorithm = "SHA-512-224" + SHA512256 HashingAlgorithm = "SHA-512-256" +) + +// String returns the string representation of a HashingAlgorithm +func (alg HashingAlgorithm) String() string { + return string(alg) +} + +// SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy +type SignatureHasher interface { + HashFile(v interface{}) ([]byte, error) +} + +type hasher struct { + h func() hash.Hash // hash function factory +} + +// NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm +func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) { + h := &hasher{} + + switch alg { + case MD5: + h.h = md5.New + case SHA1: + h.h = sha1.New + case SHA224: + h.h = sha256.New224 + case SHA256: + h.h = sha256.New + case SHA384: + h.h = sha512.New384 + case SHA512: + h.h = sha512.New + case SHA512224: + h.h = sha512.New512_224 + case SHA512256: + h.h = sha512.New512_256 + default: + return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg) + } + + return h, nil +} + +// HashFile hashes the file content, JSON or binary, both in golang native format. +func (h *hasher) HashFile(v interface{}) ([]byte, error) { + hf := h.h() + walk(v, hf) + return hf.Sum(nil), nil +} + +// walk hashes the file content, JSON or binary, both in golang native format. +// +// Computation for unstructured documents is a hash of the document. +// +// Computation for the types of structured JSON document is as follows: +// +// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }. +// +// array: Hash [, then digest of the value, then comma (between items) and finally ]. +func walk(v interface{}, h io.Writer) { + + switch x := v.(type) { + case map[string]interface{}: + _, _ = h.Write([]byte("{")) + + for i, key := range util.KeysSorted(x) { + if i > 0 { + _, _ = h.Write([]byte(",")) + } + + _, _ = h.Write(encodePrimitive(key)) + _, _ = h.Write([]byte(":")) + walk(x[key], h) + } + + _, _ = h.Write([]byte("}")) + case []interface{}: + _, _ = h.Write([]byte("[")) + + for i, e := range x { + if i > 0 { + _, _ = h.Write([]byte(",")) + } + walk(e, h) + } + + _, _ = h.Write([]byte("]")) + case []byte: + _, _ = h.Write(x) + default: + _, _ = h.Write(encodePrimitive(x)) + } +} + +func encodePrimitive(v interface{}) []byte { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + encoder.SetEscapeHTML(false) + _ = encoder.Encode(v) + return []byte(strings.Trim(buf.String(), "\n")) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go new file mode 100644 index 00000000000..aad30a675a6 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go @@ -0,0 +1,144 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle provide helpers that assist in creating the verification and signing key configuration +package bundle + +import ( + "encoding/pem" + "fmt" + "os" + + "github.com/open-policy-agent/opa/internal/jwx/jwa" + "github.com/open-policy-agent/opa/internal/jwx/jws/sign" + "github.com/open-policy-agent/opa/v1/keys" + + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + defaultTokenSigningAlg = "RS256" +) + +// KeyConfig holds the keys used to sign or verify bundles and tokens +// Moved to own package, alias kept for backwards compatibility +type KeyConfig = keys.Config + +// VerificationConfig represents the key configuration used to verify a signed bundle +type VerificationConfig struct { + PublicKeys map[string]*KeyConfig + KeyID string `json:"keyid"` + Scope string `json:"scope"` + Exclude []string `json:"exclude_files"` +} + +// NewVerificationConfig return a new VerificationConfig +func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig { + return &VerificationConfig{ + PublicKeys: keys, + KeyID: id, + Scope: scope, + Exclude: exclude, + } +} + +// ValidateAndInjectDefaults validates the config and inserts default values +func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error { + vc.PublicKeys = keys + + if vc.KeyID != "" { + found := false + for key := range keys { + if key == vc.KeyID { + found = true + break + } + } + + if !found { + return fmt.Errorf("key id %s not found", vc.KeyID) + } + } + return nil +} + +// GetPublicKey returns the public key corresponding to the given key id +func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) { + var kc *KeyConfig + var ok bool + + if kc, ok = vc.PublicKeys[id]; !ok { + return nil, fmt.Errorf("verification key corresponding to ID %v not found", id) + } + return kc, nil +} + +// SigningConfig represents the key configuration used to generate a signed bundle +type SigningConfig struct { + Plugin string + Key string + Algorithm string + ClaimsPath string +} + +// NewSigningConfig return a new SigningConfig +func NewSigningConfig(key, alg, claimsPath string) *SigningConfig { + if alg == "" { + alg = defaultTokenSigningAlg + } + + return &SigningConfig{ + Plugin: defaultSignerID, + Key: key, + Algorithm: alg, + ClaimsPath: claimsPath, + } +} + +// WithPlugin sets the signing plugin in the signing config +func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig { + if plugin != "" { + s.Plugin = plugin + } + return s +} + +// GetPrivateKey returns the private key or secret from the signing config +func (s *SigningConfig) GetPrivateKey() (interface{}, error) { + + block, _ := pem.Decode([]byte(s.Key)) + if block != nil { + return sign.GetSigningKey(s.Key, jwa.SignatureAlgorithm(s.Algorithm)) + } + + var priv string + if _, err := os.Stat(s.Key); err == nil { + bs, err := os.ReadFile(s.Key) + if err != nil { + return nil, err + } + priv = string(bs) + } else if os.IsNotExist(err) { + priv = s.Key + } else { + return nil, err + } + + return sign.GetSigningKey(priv, jwa.SignatureAlgorithm(s.Algorithm)) +} + +// GetClaims returns the claims by reading the file specified in the signing config +func (s *SigningConfig) GetClaims() (map[string]interface{}, error) { + var claims map[string]interface{} + + bs, err := os.ReadFile(s.ClaimsPath) + if err != nil { + return claims, err + } + + if err := util.UnmarshalJSON(bs, &claims); err != nil { + return claims, err + } + return claims, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go new file mode 100644 index 00000000000..710e2968603 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go @@ -0,0 +1,133 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle provide helpers that assist in the creating a signed bundle +package bundle + +import ( + "crypto/rand" + "encoding/json" + "fmt" + + "github.com/open-policy-agent/opa/internal/jwx/jwa" + "github.com/open-policy-agent/opa/internal/jwx/jws" +) + +const defaultSignerID = "_default" + +var signers map[string]Signer + +// Signer is the interface expected for implementations that generate bundle signatures. +type Signer interface { + GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error) +} + +// GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified +// in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates +// a signed token given the list of files to be included in the payload and the bundle +// signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token. +func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { + var plugin string + // for backwards compatibility, check if there is no plugin specified, and use default + if sc.Plugin == "" { + plugin = defaultSignerID + } else { + plugin = sc.Plugin + } + signer, err := GetSigner(plugin) + if err != nil { + return "", err + } + return signer.GenerateSignedToken(files, sc, keyID) +} + +// DefaultSigner is the default bundle signing implementation. It signs bundles by generating +// a JWT and signing it using a locally-accessible private key. +type DefaultSigner struct{} + +// GenerateSignedToken generates a signed token given the list of files to be +// included in the payload and the bundle signing config. The keyID if non-empty, +// represents the value for the "keyid" claim in the token +func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { + payload, err := generatePayload(files, sc, keyID) + if err != nil { + return "", err + } + + privateKey, err := sc.GetPrivateKey() + if err != nil { + return "", err + } + + var headers jws.StandardHeaders + + if err := headers.Set(jws.AlgorithmKey, jwa.SignatureAlgorithm(sc.Algorithm)); err != nil { + return "", err + } + + if keyID != "" { + if err := headers.Set(jws.KeyIDKey, keyID); err != nil { + return "", err + } + } + + hdr, err := json.Marshal(headers) + if err != nil { + return "", err + } + + token, err := jws.SignLiteral(payload, + jwa.SignatureAlgorithm(sc.Algorithm), + privateKey, + hdr, + rand.Reader) + if err != nil { + return "", err + } + return string(token), nil +} + +func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte, error) { + payload := make(map[string]interface{}) + payload["files"] = files + + if sc.ClaimsPath != "" { + claims, err := sc.GetClaims() + if err != nil { + return nil, err + } + + for claim, value := range claims { + payload[claim] = value + } + } else if keyID != "" { + // keyid claim is deprecated but include it for backwards compatibility. + payload["keyid"] = keyID + } + return json.Marshal(payload) +} + +// GetSigner returns the Signer registered under the given id +func GetSigner(id string) (Signer, error) { + signer, ok := signers[id] + if !ok { + return nil, fmt.Errorf("no signer exists under id %s", id) + } + return signer, nil +} + +// RegisterSigner registers a Signer under the given id +func RegisterSigner(id string, s Signer) error { + if id == defaultSignerID { + return fmt.Errorf("signer id %s is reserved, use a different id", id) + } + signers[id] = s + return nil +} + +func init() { + signers = map[string]Signer{ + defaultSignerID: &DefaultSigner{}, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go new file mode 100644 index 00000000000..363f7664d71 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go @@ -0,0 +1,1166 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package bundle + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "path/filepath" + "strings" + + iCompiler "github.com/open-policy-agent/opa/internal/compiler" + "github.com/open-policy-agent/opa/internal/json/patch" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/util" +) + +// BundlesBasePath is the storage path used for storing bundle metadata +var BundlesBasePath = storage.MustParsePath("/system/bundles") + +var ModulesInfoBasePath = storage.MustParsePath("/system/modules") + +// Note: As needed these helpers could be memoized. + +// ManifestStoragePath is the storage path used for the given named bundle manifest. +func ManifestStoragePath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest") +} + +// EtagStoragePath is the storage path used for the given named bundle etag. +func EtagStoragePath(name string) storage.Path { + return append(BundlesBasePath, name, "etag") +} + +func namedBundlePath(name string) storage.Path { + return append(BundlesBasePath, name) +} + +func rootsPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "roots") +} + +func revisionPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "revision") +} + +func wasmModulePath(name string) storage.Path { + return append(BundlesBasePath, name, "wasm") +} + +func wasmEntrypointsPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "wasm") +} + +func metadataPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "metadata") +} + +func moduleRegoVersionPath(id string) storage.Path { + return append(ModulesInfoBasePath, strings.Trim(id, "/"), "rego_version") +} + +func moduleInfoPath(id string) storage.Path { + return append(ModulesInfoBasePath, strings.Trim(id, "/")) +} + +func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (interface{}, error) { + value, err := store.Read(ctx, txn, path) + if err != nil { + return nil, err + } + + if astValue, ok := value.(ast.Value); ok { + value, err = ast.JSON(astValue) + if err != nil { + return nil, err + } + } + + return value, nil +} + +// ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored. +func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) { + value, err := read(ctx, store, txn, BundlesBasePath) + if err != nil { + return nil, err + } + + bundleMap, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.New("corrupt manifest roots") + } + + bundles := make([]string, len(bundleMap)) + idx := 0 + for name := range bundleMap { + bundles[idx] = name + idx++ + } + return bundles, nil +} + +// WriteManifestToStore will write the manifest into the storage. This function is called when +// the bundle is activated. +func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, manifest Manifest) error { + return write(ctx, store, txn, ManifestStoragePath(name), manifest) +} + +// WriteEtagToStore will write the bundle etag into the storage. This function is called when the bundle is activated. +func WriteEtagToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name, etag string) error { + return write(ctx, store, txn, EtagStoragePath(name), etag) +} + +func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value interface{}) error { + if err := util.RoundTrip(&value); err != nil { + return err + } + + var dir []string + if len(path) > 1 { + dir = path[:len(path)-1] + } + + if err := storage.MakeDir(ctx, store, txn, dir); err != nil { + return err + } + + return store.Write(ctx, txn, storage.AddOp, path, value) +} + +// EraseManifestFromStore will remove the manifest from storage. This function is called +// when the bundle is deactivated. +func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { + path := namedBundlePath(name) + err := store.Write(ctx, txn, storage.RemoveOp, path, nil) + return suppressNotFound(err) +} + +// eraseBundleEtagFromStore will remove the bundle etag from storage. This function is called +// when the bundle is deactivated. +func eraseBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { + path := EtagStoragePath(name) + err := store.Write(ctx, txn, storage.RemoveOp, path, nil) + return suppressNotFound(err) +} + +func suppressNotFound(err error) error { + if err == nil || storage.IsNotFound(err) { + return nil + } + return err +} + +func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error { + basePath := wasmModulePath(name) + for _, wm := range b.WasmModules { + path := append(basePath, wm.Path) + err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw)) + if err != nil { + return err + } + } + return nil +} + +func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { + path := wasmModulePath(name) + + err := store.Write(ctx, txn, storage.RemoveOp, path, nil) + return suppressNotFound(err) +} + +func eraseModuleRegoVersionsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, modules []string) error { + for _, module := range modules { + err := store.Write(ctx, txn, storage.RemoveOp, moduleInfoPath(module), nil) + if err := suppressNotFound(err); err != nil { + return err + } + } + return nil +} + +// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store. +func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) { + path := wasmEntrypointsPath(name) + value, err := read(ctx, store, txn, path) + if err != nil { + return nil, err + } + + bs, err := json.Marshal(value) + if err != nil { + return nil, errors.New("corrupt wasm manifest data") + } + + var wasmMetadata []WasmResolver + + err = util.UnmarshalJSON(bs, &wasmMetadata) + if err != nil { + return nil, errors.New("corrupt wasm manifest data") + } + + return wasmMetadata, nil +} + +// ReadWasmModulesFromStore will write Wasm module resolver metadata from the store. +func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) { + path := wasmModulePath(name) + value, err := read(ctx, store, txn, path) + if err != nil { + return nil, err + } + + encodedModules, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.New("corrupt wasm modules") + } + + rawModules := map[string][]byte{} + for path, enc := range encodedModules { + encStr, ok := enc.(string) + if !ok { + return nil, errors.New("corrupt wasm modules") + } + bs, err := base64.StdEncoding.DecodeString(encStr) + if err != nil { + return nil, err + } + rawModules[path] = bs + } + return rawModules, nil +} + +// ReadBundleRootsFromStore returns the roots in the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) { + value, err := read(ctx, store, txn, rootsPath(name)) + if err != nil { + return nil, err + } + + sl, ok := value.([]interface{}) + if !ok { + return nil, errors.New("corrupt manifest roots") + } + + roots := make([]string, len(sl)) + + for i := range sl { + roots[i], ok = sl[i].(string) + if !ok { + return nil, errors.New("corrupt manifest root") + } + } + + return roots, nil +} + +// ReadBundleRevisionFromStore returns the revision in the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { + return readRevisionFromStore(ctx, store, txn, revisionPath(name)) +} + +func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { + value, err := read(ctx, store, txn, path) + if err != nil { + return "", err + } + + str, ok := value.(string) + if !ok { + return "", errors.New("corrupt manifest revision") + } + + return str, nil +} + +// ReadBundleMetadataFromStore returns the metadata in the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]interface{}, error) { + return readMetadataFromStore(ctx, store, txn, metadataPath(name)) +} + +func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]interface{}, error) { + value, err := read(ctx, store, txn, path) + if err != nil { + return nil, suppressNotFound(err) + } + + data, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.New("corrupt manifest metadata") + } + + return data, nil +} + +// ReadBundleEtagFromStore returns the etag for the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { + return readEtagFromStore(ctx, store, txn, EtagStoragePath(name)) +} + +func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { + value, err := read(ctx, store, txn, path) + if err != nil { + return "", err + } + + str, ok := value.(string) + if !ok { + return "", errors.New("corrupt bundle etag") + } + + return str, nil +} + +// ActivateOpts defines options for the Activate API call. +type ActivateOpts struct { + Ctx context.Context + Store storage.Store + Txn storage.Transaction + TxnCtx *storage.Context + Compiler *ast.Compiler + Metrics metrics.Metrics + Bundles map[string]*Bundle // Optional + ExtraModules map[string]*ast.Module // Optional + AuthorizationDecisionRef ast.Ref + ParserOptions ast.ParserOptions + + legacy bool +} + +// Activate the bundle(s) by loading into the given Store. This will load policies, data, and record +// the manifest in storage. The compiler provided will have had the polices compiled on it. +func Activate(opts *ActivateOpts) error { + opts.legacy = false + return activateBundles(opts) +} + +// DeactivateOpts defines options for the Deactivate API call +type DeactivateOpts struct { + Ctx context.Context + Store storage.Store + Txn storage.Transaction + BundleNames map[string]struct{} + ParserOptions ast.ParserOptions +} + +// Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store. +func Deactivate(opts *DeactivateOpts) error { + erase := map[string]struct{}{} + for name := range opts.BundleNames { + roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) + if suppressNotFound(err) != nil { + return err + } + for _, root := range roots { + erase[root] = struct{}{} + } + } + _, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, opts.BundleNames, erase) + return err +} + +func activateBundles(opts *ActivateOpts) error { + + // Build collections of bundle names, modules, and roots to erase + erase := map[string]struct{}{} + names := map[string]struct{}{} + deltaBundles := map[string]*Bundle{} + snapshotBundles := map[string]*Bundle{} + + for name, b := range opts.Bundles { + if b.Type() == DeltaBundleType { + deltaBundles[name] = b + } else { + snapshotBundles[name] = b + names[name] = struct{}{} + + roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) + if suppressNotFound(err) != nil { + return err + } + for _, root := range roots { + erase[root] = struct{}{} + } + + // Erase data at new roots to prepare for writing the new data + for _, root := range *b.Manifest.Roots { + erase[root] = struct{}{} + } + } + } + + // Before changing anything make sure the roots don't collide with any + // other bundles that already are activated or other bundles being activated. + err := hasRootsOverlap(opts.Ctx, opts.Store, opts.Txn, opts.Bundles) + if err != nil { + return err + } + + if len(deltaBundles) != 0 { + err := activateDeltaBundles(opts, deltaBundles) + if err != nil { + return err + } + } + + // Erase data and policies at new + old roots, and remove the old + // manifests before activating a new snapshot bundle. + remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, names, erase) + if err != nil { + return err + } + + // Validate data in bundle does not contain paths outside the bundle's roots. + for _, b := range snapshotBundles { + + if b.lazyLoadingMode { + + for _, item := range b.Raw { + path := filepath.ToSlash(item.Path) + + if filepath.Base(path) == dataFile || filepath.Base(path) == yamlDataFile { + var val map[string]json.RawMessage + err = util.Unmarshal(item.Value, &val) + if err == nil { + err = doDFS(val, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) + if err != nil { + return err + } + } else { + // Build an object for the value + p := getNormalizedPath(path) + + if len(p) == 0 { + return errors.New("root value must be object") + } + + // verify valid YAML or JSON value + var x interface{} + err := util.Unmarshal(item.Value, &x) + if err != nil { + return err + } + + value := item.Value + dir := map[string]json.RawMessage{} + for i := len(p) - 1; i > 0; i-- { + dir[p[i]] = value + + bs, err := json.Marshal(dir) + if err != nil { + return err + } + + value = bs + dir = map[string]json.RawMessage{} + } + dir[p[0]] = value + + err = doDFS(dir, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) + if err != nil { + return err + } + } + } + } + } + } + + // Compile the modules all at once to avoid having to re-do work. + remainingAndExtra := make(map[string]*ast.Module) + for name, mod := range remaining { + remainingAndExtra[name] = mod + } + for name, mod := range opts.ExtraModules { + remainingAndExtra[name] = mod + } + + err = compileModules(opts.Compiler, opts.Metrics, snapshotBundles, remainingAndExtra, opts.legacy, opts.AuthorizationDecisionRef) + if err != nil { + return err + } + + if err := writeDataAndModules(opts.Ctx, opts.Store, opts.Txn, opts.TxnCtx, snapshotBundles, opts.legacy, opts.ParserOptions.RegoVersion); err != nil { + return err + } + + if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { + return err + } + + for name, b := range snapshotBundles { + if err := writeManifestToStore(opts, name, b.Manifest); err != nil { + return err + } + + if err := writeEtagToStore(opts, name, b.Etag); err != nil { + return err + } + + if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil { + return err + } + } + + return nil +} + +func doDFS(obj map[string]json.RawMessage, path string, roots []string) error { + if len(roots) == 1 && roots[0] == "" { + return nil + } + + for key := range obj { + + newPath := filepath.Join(strings.Trim(path, "/"), key) + + // Note: filepath.Join can return paths with '\' separators, always use + // filepath.ToSlash to keep them normalized. + newPath = strings.TrimLeft(normalizePath(newPath), "/.") + + contains := false + prefix := false + if RootPathsContain(roots, newPath) { + contains = true + } else { + for i := range roots { + if strings.HasPrefix(strings.Trim(roots[i], "/"), newPath) { + prefix = true + break + } + } + } + + if !contains && !prefix { + return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) + } + + if contains { + continue + } + + var next map[string]json.RawMessage + err := util.Unmarshal(obj[key], &next) + if err != nil { + return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) + } + + if err := doDFS(next, newPath, roots); err != nil { + return err + } + } + return nil +} + +func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error { + + // Check that the manifest roots and wasm resolvers in the delta bundle + // match with those currently in the store + for name, b := range bundles { + value, err := opts.Store.Read(opts.Ctx, opts.Txn, ManifestStoragePath(name)) + if err != nil { + if storage.IsNotFound(err) { + continue + } + return err + } + + manifest, err := valueToManifest(value) + if err != nil { + return fmt.Errorf("corrupt manifest data: %w", err) + } + + if !b.Manifest.equalWasmResolversAndRoots(manifest) { + return fmt.Errorf("delta bundle '%s' has wasm resolvers or manifest roots that are different from those in the store", name) + } + } + + for _, b := range bundles { + err := applyPatches(opts.Ctx, opts.Store, opts.Txn, b.Patch.Data) + if err != nil { + return err + } + } + + if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { + return err + } + + for name, b := range bundles { + if err := writeManifestToStore(opts, name, b.Manifest); err != nil { + return err + } + + if err := writeEtagToStore(opts, name, b.Etag); err != nil { + return err + } + } + + return nil +} + +func valueToManifest(v interface{}) (Manifest, error) { + if astV, ok := v.(ast.Value); ok { + var err error + v, err = ast.JSON(astV) + if err != nil { + return Manifest{}, err + } + } + + var manifest Manifest + + bs, err := json.Marshal(v) + if err != nil { + return Manifest{}, err + } + + err = util.UnmarshalJSON(bs, &manifest) + if err != nil { + return Manifest{}, err + } + + return manifest, nil +} + +// erase bundles by name and roots. This will clear all policies and data at its roots and remove its +// manifest from storage. +func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) { + + if err := eraseData(ctx, store, txn, roots); err != nil { + return nil, err + } + + remaining, removed, err := erasePolicies(ctx, store, txn, parserOpts, roots) + if err != nil { + return nil, err + } + + for name := range names { + if err := EraseManifestFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { + return nil, err + } + + if err := LegacyEraseManifestFromStore(ctx, store, txn); suppressNotFound(err) != nil { + return nil, err + } + + if err := eraseBundleEtagFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { + return nil, err + } + + if err := eraseWasmModulesFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { + return nil, err + } + } + + err = eraseModuleRegoVersionsFromStore(ctx, store, txn, removed) + if err != nil { + return nil, err + } + + return remaining, nil +} + +func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) error { + for root := range roots { + path, ok := storage.ParsePathEscaped("/" + root) + if !ok { + return fmt.Errorf("manifest root path invalid: %v", root) + } + + if len(path) > 0 { + if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); suppressNotFound(err) != nil { + return err + } + } + } + return nil +} + +type moduleInfo struct { + RegoVersion ast.RegoVersion `json:"rego_version"` +} + +func readModuleInfoFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (map[string]moduleInfo, error) { + value, err := read(ctx, store, txn, ModulesInfoBasePath) + if suppressNotFound(err) != nil { + return nil, err + } + + if value == nil { + return nil, nil + } + + if m, ok := value.(map[string]any); ok { + versions := make(map[string]moduleInfo, len(m)) + + for k, v := range m { + if m0, ok := v.(map[string]any); ok { + if ver, ok := m0["rego_version"]; ok { + if vs, ok := ver.(json.Number); ok { + i, err := vs.Int64() + if err != nil { + return nil, errors.New("corrupt rego version") + } + versions[k] = moduleInfo{RegoVersion: ast.RegoVersionFromInt(int(i))} + } + } + } + } + return versions, nil + } + + return nil, errors.New("corrupt rego version") +} + +func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, []string, error) { + + ids, err := store.ListPolicies(ctx, txn) + if err != nil { + return nil, nil, err + } + + modulesInfo, err := readModuleInfoFromStore(ctx, store, txn) + if err != nil { + return nil, nil, fmt.Errorf("failed to read module info from store: %w", err) + } + + getRegoVersion := func(modId string) (ast.RegoVersion, bool) { + info, ok := modulesInfo[modId] + if !ok { + return ast.RegoUndefined, false + } + return info.RegoVersion, true + } + + remaining := map[string]*ast.Module{} + var removed []string + + for _, id := range ids { + bs, err := store.GetPolicy(ctx, txn, id) + if err != nil { + return nil, nil, err + } + + parserOptsCpy := parserOpts + if regoVersion, ok := getRegoVersion(id); ok { + parserOptsCpy.RegoVersion = regoVersion + } + + module, err := ast.ParseModuleWithOpts(id, string(bs), parserOptsCpy) + if err != nil { + return nil, nil, err + } + path, err := module.Package.Path.Ptr() + if err != nil { + return nil, nil, err + } + deleted := false + for root := range roots { + if RootPathsContain([]string{root}, path) { + if err := store.DeletePolicy(ctx, txn, id); err != nil { + return nil, nil, err + } + deleted = true + break + } + } + + if deleted { + removed = append(removed, id) + } else { + remaining[id] = module + } + } + + return remaining, removed, nil +} + +func writeManifestToStore(opts *ActivateOpts, name string, manifest Manifest) error { + // Always write manifests to the named location. If the plugin is in the older style config + // then also write to the old legacy unnamed location. + if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, manifest); err != nil { + return err + } + + if opts.legacy { + if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, manifest); err != nil { + return err + } + } + + return nil +} + +func writeEtagToStore(opts *ActivateOpts, name, etag string) error { + if err := WriteEtagToStore(opts.Ctx, opts.Store, opts.Txn, name, etag); err != nil { + return err + } + + return nil +} + +func writeModuleRegoVersionToStore(ctx context.Context, store storage.Store, txn storage.Transaction, b *Bundle, + mf ModuleFile, storagePath string, runtimeRegoVersion ast.RegoVersion) error { + + var regoVersion ast.RegoVersion + if mf.Parsed != nil { + regoVersion = mf.Parsed.RegoVersion() + } + + if regoVersion == ast.RegoUndefined { + var err error + regoVersion, err = b.RegoVersionForFile(mf.Path, runtimeRegoVersion) + if err != nil { + return fmt.Errorf("failed to get rego version for module '%s' in bundle: %w", mf.Path, err) + } + } + + if regoVersion != ast.RegoUndefined && regoVersion != runtimeRegoVersion { + if err := write(ctx, store, txn, moduleRegoVersionPath(storagePath), regoVersion.Int()); err != nil { + return fmt.Errorf("failed to write rego version for module '%s': %w", storagePath, err) + } + } + return nil +} + +func writeDataAndModules(ctx context.Context, store storage.Store, txn storage.Transaction, txnCtx *storage.Context, bundles map[string]*Bundle, legacy bool, runtimeRegoVersion ast.RegoVersion) error { + params := storage.WriteParams + params.Context = txnCtx + + for name, b := range bundles { + if len(b.Raw) == 0 { + // Write data from each new bundle into the store. Only write under the + // roots contained in their manifest. + if err := writeData(ctx, store, txn, *b.Manifest.Roots, b.Data); err != nil { + return err + } + + for _, mf := range b.Modules { + var path string + + // For backwards compatibility, in legacy mode, upsert policies to + // the unprefixed path. + if legacy { + path = mf.Path + } else { + path = modulePathWithPrefix(name, mf.Path) + } + + if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { + return err + } + + if err := writeModuleRegoVersionToStore(ctx, store, txn, b, mf, path, runtimeRegoVersion); err != nil { + return err + } + } + } else { + params.BasePaths = *b.Manifest.Roots + + err := store.Truncate(ctx, txn, params, NewIterator(b.Raw)) + if err != nil { + return fmt.Errorf("store truncate failed for bundle '%s': %v", name, err) + } + + for _, f := range b.Raw { + if strings.HasSuffix(f.Path, RegoExt) { + p, err := getFileStoragePath(f.Path) + if err != nil { + return fmt.Errorf("failed get storage path for module '%s' in bundle '%s': %w", f.Path, name, err) + } + + if m := f.module; m != nil { + // 'f.module.Path' contains the module's path as it relates to the bundle root, and can be used for looking up the rego-version. + // 'f.Path' can differ, based on how the bundle reader was initialized. + if err := writeModuleRegoVersionToStore(ctx, store, txn, b, *m, p.String(), runtimeRegoVersion); err != nil { + return err + } + } + } + } + } + } + + return nil +} + +func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]interface{}) error { + for _, root := range roots { + path, ok := storage.ParsePathEscaped("/" + root) + if !ok { + return fmt.Errorf("manifest root path invalid: %v", root) + } + if value, ok := lookup(path, data); ok { + if len(path) > 0 { + if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { + return err + } + } + if err := store.Write(ctx, txn, storage.AddOp, path, value); err != nil { + return err + } + } + } + return nil +} + +func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool, authorizationDecisionRef ast.Ref) error { + + m.Timer(metrics.RegoModuleCompile).Start() + defer m.Timer(metrics.RegoModuleCompile).Stop() + + modules := map[string]*ast.Module{} + + // preserve any modules already on the compiler + for name, module := range compiler.Modules { + modules[name] = module + } + + // preserve any modules passed in from the store + for name, module := range extraModules { + modules[name] = module + } + + // include all the new bundle modules + for bundleName, b := range bundles { + if legacy { + for _, mf := range b.Modules { + modules[mf.Path] = mf.Parsed + } + } else { + for name, module := range b.ParsedModules(bundleName) { + modules[name] = module + } + } + } + + if compiler.Compile(modules); compiler.Failed() { + return compiler.Errors + } + + if authorizationDecisionRef.Equal(ast.EmptyRef()) { + return nil + } + + return iCompiler.VerifyAuthorizationPolicySchema(compiler, authorizationDecisionRef) +} + +func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error { + + m.Timer(metrics.RegoModuleCompile).Start() + defer m.Timer(metrics.RegoModuleCompile).Stop() + + modules := map[string]*ast.Module{} + + // preserve any modules already on the compiler + for name, module := range compiler.Modules { + modules[name] = module + } + + // preserve any modules passed in from the store + for name, module := range extraModules { + modules[name] = module + } + + // include all the new bundle modules + for bundleName, b := range bundles { + if legacy { + for _, mf := range b.Modules { + modules[mf.Path] = mf.Parsed + } + } else { + for name, module := range b.ParsedModules(bundleName) { + modules[name] = module + } + } + } + + if compiler.Compile(modules); compiler.Failed() { + return compiler.Errors + } + for bundleName, b := range bundles { + for _, mf := range b.Modules { + var path string + + // For backwards compatibility, in legacy mode, upsert policies to + // the unprefixed path. + if legacy { + path = mf.Path + } else { + path = modulePathWithPrefix(bundleName, mf.Path) + } + + if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { + return err + } + } + } + return nil +} + +func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) { + if len(path) == 0 { + return data, true + } + for i := range len(path) - 1 { + value, ok := data[path[i]] + if !ok { + return nil, false + } + obj, ok := value.(map[string]interface{}) + if !ok { + return nil, false + } + data = obj + } + value, ok := data[path[len(path)-1]] + return value, ok +} + +func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, bundles map[string]*Bundle) error { + collisions := map[string][]string{} + allBundles, err := ReadBundleNamesFromStore(ctx, store, txn) + if suppressNotFound(err) != nil { + return err + } + + allRoots := map[string][]string{} + + // Build a map of roots for existing bundles already in the system + for _, name := range allBundles { + roots, err := ReadBundleRootsFromStore(ctx, store, txn, name) + if suppressNotFound(err) != nil { + return err + } + allRoots[name] = roots + } + + // Add in any bundles that are being activated, overwrite existing roots + // with new ones where bundles are in both groups. + for name, bundle := range bundles { + allRoots[name] = *bundle.Manifest.Roots + } + + // Now check for each new bundle if it conflicts with any of the others + for name, bundle := range bundles { + for otherBundle, otherRoots := range allRoots { + if name == otherBundle { + // Skip the current bundle being checked + continue + } + + // Compare the "new" roots with other existing (or a different bundles new roots) + for _, newRoot := range *bundle.Manifest.Roots { + for _, otherRoot := range otherRoots { + if RootPathsOverlap(newRoot, otherRoot) { + collisions[otherBundle] = append(collisions[otherBundle], newRoot) + } + } + } + } + } + + if len(collisions) > 0 { + var bundleNames []string + for name := range collisions { + bundleNames = append(bundleNames, name) + } + return fmt.Errorf("detected overlapping roots in bundle manifest with: %s", bundleNames) + } + return nil +} + +func applyPatches(ctx context.Context, store storage.Store, txn storage.Transaction, patches []PatchOperation) error { + for _, pat := range patches { + + // construct patch path + path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/")) + if !ok { + return errors.New("error parsing patch path") + } + + var op storage.PatchOp + switch pat.Op { + case "upsert": + op = storage.AddOp + + _, err := store.Read(ctx, txn, path[:len(path)-1]) + if err != nil { + if !storage.IsNotFound(err) { + return err + } + + if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { + return err + } + } + case "remove": + op = storage.RemoveOp + case "replace": + op = storage.ReplaceOp + default: + return fmt.Errorf("bad patch operation: %v", pat.Op) + } + + // apply the patch + if err := store.Write(ctx, txn, op, path, pat.Value); err != nil { + return err + } + } + + return nil +} + +// Helpers for the older single (unnamed) bundle style manifest storage. + +// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored. +// Deprecated: Use ManifestStoragePath and named bundles instead. +var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest") +var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision") + +// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location. +// Deprecated: Use WriteManifestToStore and named bundles instead. +func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error { + return write(ctx, store, txn, legacyManifestStoragePath, manifest) +} + +// LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location. +// Deprecated: Use WriteManifestToStore and named bundles instead. +func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error { + err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil) + if err != nil { + return err + } + return nil +} + +// LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location. +// Deprecated: Use ReadBundleRevisionFromStore and named bundles instead. +func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) { + return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath) +} + +// ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location. +// Deprecated: Use Activate with named bundles instead. +func ActivateLegacy(opts *ActivateOpts) error { + opts.legacy = true + return activateBundles(opts) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go new file mode 100644 index 00000000000..0645d3aafb8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go @@ -0,0 +1,232 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle provide helpers that assist in the bundle signature verification process +package bundle + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + + "github.com/open-policy-agent/opa/internal/jwx/jwa" + "github.com/open-policy-agent/opa/internal/jwx/jws" + "github.com/open-policy-agent/opa/internal/jwx/jws/verify" + "github.com/open-policy-agent/opa/v1/util" +) + +const defaultVerifierID = "_default" + +var verifiers map[string]Verifier + +// Verifier is the interface expected for implementations that verify bundle signatures. +type Verifier interface { + VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error) +} + +// VerifyBundleSignature will retrieve the Verifier implementation based +// on the Plugin specified in SignaturesConfig, and call its implementation +// of VerifyBundleSignature. VerifyBundleSignature verifies the bundle signature +// using the given public keys or secret. If a signature is verified, it keeps +// track of the files specified in the JWT payload +func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { + // default implementation does not return a nil for map, so don't + // do it here either + files := make(map[string]FileInfo) + var plugin string + // for backwards compatibility, check if there is no plugin specified, and use default + if sc.Plugin == "" { + plugin = defaultVerifierID + } else { + plugin = sc.Plugin + } + verifier, err := GetVerifier(plugin) + if err != nil { + return files, err + } + return verifier.VerifyBundleSignature(sc, bvc) +} + +// DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking +// the JWT signature using a locally-accessible public key. +type DefaultVerifier struct{} + +// VerifyBundleSignature verifies the bundle signature using the given public keys or secret. +// If a signature is verified, it keeps track of the files specified in the JWT payload +func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { + files := make(map[string]FileInfo) + + if len(sc.Signatures) == 0 { + return files, errors.New(".signatures.json: missing JWT (expected exactly one)") + } + + if len(sc.Signatures) > 1 { + return files, errors.New(".signatures.json: multiple JWTs not supported (expected exactly one)") + } + + for _, token := range sc.Signatures { + payload, err := verifyJWTSignature(token, bvc) + if err != nil { + return files, err + } + + for _, file := range payload.Files { + files[file.Name] = file + } + } + return files, nil +} + +func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) { + // decode JWT to check if the header specifies the key to use and/or if claims have the scope. + + parts, err := jws.SplitCompact(token) + if err != nil { + return nil, err + } + + var decodedHeader []byte + if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil { + return nil, fmt.Errorf("failed to base64 decode JWT headers: %w", err) + } + + var hdr jws.StandardHeaders + if err := json.Unmarshal(decodedHeader, &hdr); err != nil { + return nil, fmt.Errorf("failed to parse JWT headers: %w", err) + } + + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return nil, err + } + + var ds DecodedSignature + if err := json.Unmarshal(payload, &ds); err != nil { + return nil, err + } + + // check for the id of the key to use for JWT signature verification + // first in the OPA config. If not found, then check the JWT kid. + keyID := bvc.KeyID + if keyID == "" { + keyID = hdr.KeyID + } + if keyID == "" { + // If header has no key id, check the deprecated key claim. + keyID = ds.KeyID + } + + if keyID == "" { + return nil, errors.New("verification key ID is empty") + } + + // now that we have the keyID, fetch the actual key + keyConfig, err := bvc.GetPublicKey(keyID) + if err != nil { + return nil, err + } + + // verify JWT signature + alg := jwa.SignatureAlgorithm(keyConfig.Algorithm) + key, err := verify.GetSigningKey(keyConfig.Key, alg) + if err != nil { + return nil, err + } + + _, err = jws.Verify([]byte(token), alg, key) + if err != nil { + return nil, err + } + + // verify the scope + scope := bvc.Scope + if scope == "" { + scope = keyConfig.Scope + } + + if ds.Scope != scope { + return nil, errors.New("scope mismatch") + } + return &ds, nil +} + +// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature +func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error { + var file FileInfo + var ok bool + + if file, ok = files[path]; !ok { + return fmt.Errorf("file %v not included in bundle signature", path) + } + + if file.Algorithm == "" { + return fmt.Errorf("no hashing algorithm provided for file %v", path) + } + + hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm)) + if err != nil { + return err + } + + // hash the file content + // For unstructured files, hash the byte stream of the file + // For structured files, read the byte stream and parse into a JSON structure; + // then recursively order the fields of all objects alphabetically and then apply + // the hash function to result to compute the hash. This ensures that the digital signature is + // independent of whitespace and other non-semantic JSON features. + var value interface{} + if IsStructuredDoc(path) { + err := util.Unmarshal(data.Bytes(), &value) + if err != nil { + return err + } + } else { + value = data.Bytes() + } + + bs, err := hash.HashFile(value) + if err != nil { + return err + } + + // compare file hash with same file in the JWT payloads + fb, err := hex.DecodeString(file.Hash) + if err != nil { + return err + } + + if !bytes.Equal(fb, bs) { + return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs) + } + + delete(files, path) + return nil +} + +// GetVerifier returns the Verifier registered under the given id +func GetVerifier(id string) (Verifier, error) { + verifier, ok := verifiers[id] + if !ok { + return nil, fmt.Errorf("no verifier exists under id %s", id) + } + return verifier, nil +} + +// RegisterVerifier registers a Verifier under the given id +func RegisterVerifier(id string, v Verifier) error { + if id == defaultVerifierID { + return fmt.Errorf("verifier id %s is reserved, use a different id", id) + } + verifiers[id] = v + return nil +} + +func init() { + verifiers = map[string]Verifier{ + defaultVerifierID: &DefaultVerifier{}, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go b/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go new file mode 100644 index 00000000000..5b0bb1ea523 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go @@ -0,0 +1,18 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +//go:build go1.16 +// +build go1.16 + +package capabilities + +import ( + v0 "github.com/open-policy-agent/opa/capabilities" +) + +// FS contains the embedded capabilities/ directory of the built version, +// which has all the capabilities of previous versions: +// "v0.18.0.json" contains the capabilities JSON of version v0.18.0, etc + +var FS = v0.FS diff --git a/vendor/github.com/open-policy-agent/opa/config/config.go b/vendor/github.com/open-policy-agent/opa/v1/config/config.go similarity index 96% rename from vendor/github.com/open-policy-agent/opa/config/config.go rename to vendor/github.com/open-policy-agent/opa/v1/config/config.go index 87ab109113a..490f90b9052 100644 --- a/vendor/github.com/open-policy-agent/opa/config/config.go +++ b/vendor/github.com/open-policy-agent/opa/v1/config/config.go @@ -7,6 +7,7 @@ package config import ( "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -14,10 +15,10 @@ import ( "sort" "strings" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/ref" - "github.com/open-policy-agent/opa/util" - "github.com/open-policy-agent/opa/version" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" + "github.com/open-policy-agent/opa/v1/version" ) // Config represents the configuration file that OPA can be started with. @@ -98,7 +99,7 @@ func (c Config) PluginNames() (result []string) { // PluginsEnabled returns true if one or more plugin features are enabled. // -// Deprecated. Use PluginNames instead. +// Deprecated: Use PluginNames instead. func (c Config) PluginsEnabled() bool { return c.Bundle != nil || c.Bundles != nil || c.DecisionLogs != nil || c.Status != nil || len(c.Plugins) > 0 } @@ -243,7 +244,7 @@ func removeCryptoKeys(x interface{}) error { func removeKey(x interface{}, keys ...string) error { val, ok := x.(map[string]interface{}) if !ok { - return fmt.Errorf("type assertion error") + return errors.New("type assertion error") } for _, key := range keys { diff --git a/vendor/github.com/open-policy-agent/opa/format/format.go b/vendor/github.com/open-policy-agent/opa/v1/format/format.go similarity index 56% rename from vendor/github.com/open-policy-agent/opa/format/format.go rename to vendor/github.com/open-policy-agent/opa/v1/format/format.go index 43e55946693..2b0f2af15a2 100644 --- a/vendor/github.com/open-policy-agent/opa/format/format.go +++ b/vendor/github.com/open-policy-agent/opa/v1/format/format.go @@ -7,15 +7,17 @@ package format import ( "bytes" + "errors" "fmt" "regexp" + "slices" "sort" "strings" "unicode" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/future" - "github.com/open-policy-agent/opa/types" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/types" ) // Opts lets you control the code formatting via `AstWithOpts()`. @@ -31,6 +33,17 @@ type Opts struct { // ParserOptions is the parser options used when parsing the module to be formatted. ParserOptions *ast.ParserOptions + + // DropV0Imports instructs the formatter to drop all v0 imports from the module; i.e. 'rego.v1' and 'future.keywords' imports. + // Imports are only removed if [Opts.RegoVersion] makes them redundant. + DropV0Imports bool +} + +func (o Opts) effectiveRegoVersion() ast.RegoVersion { + if o.RegoVersion == ast.RegoUndefined { + return ast.DefaultRegoVersion + } + return o.RegoVersion } // defaultLocationFile is the file name used in `Ast()` for terms @@ -46,15 +59,19 @@ func Source(filename string, src []byte) ([]byte, error) { } func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { + regoVersion := opts.effectiveRegoVersion() + var parserOpts ast.ParserOptions if opts.ParserOptions != nil { parserOpts = *opts.ParserOptions - } else { - if opts.RegoVersion == ast.RegoV1 { - // If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported. - // Otherwise, we'll default to the default rego-version. - parserOpts.RegoVersion = ast.RegoV1 - } + } else if regoVersion == ast.RegoV1 { + // If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported. + // Otherwise, we'll default to the default rego-version. + parserOpts.RegoVersion = ast.RegoV1 + } + + if parserOpts.RegoVersion == ast.RegoUndefined { + parserOpts.RegoVersion = ast.DefaultRegoVersion } module, err := ast.ParseModuleWithOpts(filename, string(src), parserOpts) @@ -62,15 +79,15 @@ func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { return nil, err } - if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 { + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 { checkOpts := ast.NewRegoCheckOptions() // The module is parsed as v0, so we need to disable checks that will be automatically amended by the AstWithOpts call anyways. checkOpts.RequireIfKeyword = false checkOpts.RequireContainsKeyword = false checkOpts.RequireRuleBodyOrValue = false - errors := ast.CheckRegoV1WithOptions(module, checkOpts) - if len(errors) > 0 { - return nil, errors + errs := ast.CheckRegoV1WithOptions(module, checkOpts) + if len(errs) > 0 { + return nil, errs } } @@ -83,7 +100,7 @@ func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { } // MustAst is a helper function to format a Rego AST element. If any errors -// occurs this function will panic. This is mostly used for test +// occur this function will panic. This is mostly used for test func MustAst(x interface{}) []byte { bs, err := Ast(x) if err != nil { @@ -93,7 +110,7 @@ func MustAst(x interface{}) []byte { } // MustAstWithOpts is a helper function to format a Rego AST element. If any errors -// occurs this function will panic. This is mostly used for test +// occur this function will panic. This is mostly used for test func MustAstWithOpts(x interface{}, opts Opts) []byte { bs, err := AstWithOpts(x, opts) if err != nil { @@ -127,6 +144,7 @@ type fmtOpts struct { refHeads bool regoV1 bool + regoV1Imported bool futureKeywords []string } @@ -154,12 +172,16 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { o := fmtOpts{} - if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 { + regoVersion := opts.effectiveRegoVersion() + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 { o.regoV1 = true o.ifs = true o.contains = true } + memberRef := ast.Member.Ref() + memberWithKeyRef := ast.MemberWithKey.Ref() + // Preprocess the AST. Set any required defaults and calculate // values required for printing the formatted output. ast.WalkNodes(x, func(x ast.Node) bool { @@ -173,7 +195,7 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { case *ast.Expr: switch { - case n.IsCall() && ast.Member.Ref().Equal(n.Operator()) || ast.MemberWithKey.Ref().Equal(n.Operator()): + case n.IsCall() && memberRef.Equal(n.Operator()) || memberWithKeyRef.Equal(n.Operator()): extraFutureKeywordImports["in"] = struct{}{} case n.IsEvery(): extraFutureKeywordImports["every"] = struct{}{} @@ -186,6 +208,7 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { switch { case isRegoV1Compatible(n): + o.regoV1Imported = true o.contains = true o.ifs = true case future.IsAllFutureKeywords(n): @@ -220,43 +243,83 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { switch x := x.(type) { case *ast.Module: - if opts.RegoVersion == ast.RegoV1 { + if regoVersion == ast.RegoV1 && opts.DropV0Imports { x.Imports = filterRegoV1Import(x.Imports) - } else if opts.RegoVersion == ast.RegoV0CompatV1 { + } else if regoVersion == ast.RegoV0CompatV1 { x.Imports = ensureRegoV1Import(x.Imports) } - if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 || moduleIsRegoV1Compatible(x) { - x.Imports = future.FilterFutureImports(x.Imports) + regoV1Imported := moduleIsRegoV1Compatible(x) + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 || regoV1Imported { + if !opts.DropV0Imports && !regoV1Imported { + for _, kw := range o.futureKeywords { + x.Imports = ensureFutureKeywordImport(x.Imports, kw) + } + } else { + x.Imports = future.FilterFutureImports(x.Imports) + } } else { for kw := range extraFutureKeywordImports { x.Imports = ensureFutureKeywordImport(x.Imports, kw) } } - w.writeModule(x) + err := w.writeModule(x) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } case *ast.Package: - w.writePackage(x, nil) + _, err := w.writePackage(x, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } case *ast.Import: - w.writeImports([]*ast.Import{x}, nil) + _, err := w.writeImports([]*ast.Import{x}, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } case *ast.Rule: - w.writeRule(x, false /* isElse */, nil) + _, err := w.writeRule(x, false /* isElse */, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } case *ast.Head: - w.writeHead(x, + _, err := w.writeHead(x, false, // isDefault false, // isExpandedConst nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } case ast.Body: - w.writeBody(x, nil) + _, err := w.writeBody(x, nil) + if err != nil { + return nil, err + } case *ast.Expr: - w.writeExpr(x, nil) + _, err := w.writeExpr(x, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } case *ast.With: - w.writeWith(x, nil, false) + _, err := w.writeWith(x, nil, false) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } case *ast.Term: - w.writeTerm(x, nil) + _, err := w.writeTerm(x, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } case ast.Value: - w.writeTerm(&ast.Term{Value: x, Location: &ast.Location{}}, nil) + _, err := w.writeTerm(&ast.Term{Value: x, Location: &ast.Location{}}, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } case *ast.Comment: - w.writeComments([]*ast.Comment{x}) + err := w.writeComments([]*ast.Comment{x}) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } default: return nil, fmt.Errorf("not an ast element: %v", x) } @@ -309,16 +372,17 @@ func defaultLocation(x ast.Node) *ast.Location { type writer struct { buf bytes.Buffer - indent string - level int - inline bool - beforeEnd *ast.Comment - delay bool - errs ast.Errors - fmtOpts fmtOpts + indent string + level int + inline bool + beforeEnd *ast.Comment + delay bool + errs ast.Errors + fmtOpts fmtOpts + writeCommentOnFinalLine bool } -func (w *writer) writeModule(module *ast.Module) { +func (w *writer) writeModule(module *ast.Module) error { var pkg *ast.Package var others []interface{} var comments []*ast.Comment @@ -340,23 +404,41 @@ func (w *writer) writeModule(module *ast.Module) { visitor.Walk(module) sort.Slice(comments, func(i, j int) bool { - return locLess(comments[i], comments[j]) + l, err := locLess(comments[i], comments[j]) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } + return l }) sort.Slice(others, func(i, j int) bool { - return locLess(others[i], others[j]) + l, err := locLess(others[i], others[j]) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } + return l }) comments = trimTrailingWhitespaceInComments(comments) - comments = w.writePackage(pkg, comments) + var err error + comments, err = w.writePackage(pkg, comments) + if err != nil { + return err + } var imports []*ast.Import var rules []*ast.Rule for len(others) > 0 { imports, others = gatherImports(others) - comments = w.writeImports(imports, comments) + comments, err = w.writeImports(imports, comments) + if err != nil { + return err + } rules, others = gatherRules(others) - comments = w.writeRules(rules, comments) + comments, err = w.writeRules(rules, comments) + if err != nil { + return err + } } for i, c := range comments { @@ -365,6 +447,8 @@ func (w *writer) writeModule(module *ast.Module) { w.write("\n") } } + + return nil } func trimTrailingWhitespaceInComments(comments []*ast.Comment) []*ast.Comment { @@ -375,45 +459,97 @@ func trimTrailingWhitespaceInComments(comments []*ast.Comment) []*ast.Comment { return comments } -func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, pkg.Location) +func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, pkg.Location) + if err != nil { + return nil, err + } w.startLine() // Omit head as all packages have the DefaultRootDocument prepended at parse time. path := make(ast.Ref, len(pkg.Path)-1) + if len(path) == 0 { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, pkg.Location, "invalid package path: %s", pkg.Path)) + return comments, nil + } + path[0] = ast.VarTerm(string(pkg.Path[1].Value.(ast.String))) copy(path[1:], pkg.Path[2:]) w.write("package ") - w.writeRef(path) + _, err = w.writeRef(path, nil) + if err != nil { + return nil, err + } w.blankLine() - return comments + return comments, nil } -func (w *writer) writeComments(comments []*ast.Comment) { - for i := 0; i < len(comments); i++ { - if i > 0 && locCmp(comments[i], comments[i-1]) > 1 { - w.blankLine() +func (w *writer) writeComments(comments []*ast.Comment) error { + for i := range comments { + if i > 0 { + l, err := locCmp(comments[i], comments[i-1]) + if err != nil { + return err + } + if l > 1 { + w.blankLine() + } } + w.writeLine(comments[i].String()) } + + return nil } -func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) []*ast.Comment { - for _, rule := range rules { - comments = w.insertComments(comments, rule.Location) - comments = w.writeRule(rule, false, comments) +func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) ([]*ast.Comment, error) { + for i, rule := range rules { + var err error + comments, err = w.insertComments(comments, rule.Location) + if err != nil && !errors.As(err, &unexpectedCommentError{}) { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } + + comments, err = w.writeRule(rule, false, comments) + if err != nil && !errors.As(err, &unexpectedCommentError{}) { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } + + if i < len(rules)-1 && w.groupableOneLiner(rule) { + next := rules[i+1] + if w.groupableOneLiner(next) && next.Location.Row == rule.Location.Row+1 { + // Current rule and the next are both groupable one-liners, and + // adjacent in the original policy (i.e. no extra newlines between them). + continue + } + } w.blankLine() } - return comments + return comments, nil +} + +var expandedConst = ast.NewBody(ast.NewExpr(ast.InternedBooleanTerm(true))) + +func (w *writer) groupableOneLiner(rule *ast.Rule) bool { + // Location required to determine if two rules are adjacent in the policy. + // If not, we respect line breaks between rules. + if len(rule.Body) > 1 || rule.Default || rule.Location == nil { + return false + } + + partialSetException := w.fmtOpts.contains || rule.Head.Value != nil + + return (w.fmtOpts.regoV1 || w.fmtOpts.ifs) && partialSetException } -func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) ([]*ast.Comment, error) { if rule == nil { - return comments + return comments, nil } if !isElse { @@ -428,37 +564,67 @@ func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) // `foo = {"a": "b"} { true }` in the AST. We want to preserve that notation // in the formatted code instead of expanding the bodies into rules, so we // pretend that the rule has no body in this case. - isExpandedConst := rule.Body.Equal(ast.NewBody(ast.NewExpr(ast.BooleanTerm(true)))) && rule.Else == nil - - comments = w.writeHead(rule.Head, rule.Default, isExpandedConst, comments) + isExpandedConst := rule.Body.Equal(expandedConst) && rule.Else == nil + w.writeCommentOnFinalLine = isExpandedConst - // this excludes partial sets UNLESS `contains` is used - partialSetException := w.fmtOpts.contains || rule.Head.Value != nil + var err error + var unexpectedComment bool + comments, err = w.writeHead(rule.Head, rule.Default, isExpandedConst, comments) + if err != nil { + if errors.As(err, &unexpectedCommentError{}) { + unexpectedComment = true + } else { + return nil, err + } + } if len(rule.Body) == 0 || isExpandedConst { w.endLine() - return comments + return comments, nil } + w.writeCommentOnFinalLine = true + + // this excludes partial sets UNLESS `contains` is used + partialSetException := w.fmtOpts.contains || rule.Head.Value != nil + if (w.fmtOpts.regoV1 || w.fmtOpts.ifs) && partialSetException { w.write(" if") if len(rule.Body) == 1 { if rule.Body[0].Location.Row == rule.Head.Location.Row { w.write(" ") - comments = w.writeExpr(rule.Body[0], comments) + var err error + comments, err = w.writeExpr(rule.Body[0], comments) + if err != nil { + return nil, err + } w.endLine() if rule.Else != nil { - comments = w.writeElse(rule, comments) + comments, err = w.writeElse(rule, comments) + if err != nil { + return nil, err + } } - return comments + return comments, nil } } } - w.write(" {") - w.endLine() + if unexpectedComment && len(comments) > 0 { + w.write(" { ") + } else { + w.write(" {") + w.endLine() + } + w.up() - comments = w.writeBody(rule.Body, comments) + comments, err = w.writeBody(rule.Body, comments) + if err != nil { + // the unexpected comment error is passed up to be handled by writeHead + if !errors.As(err, &unexpectedCommentError{}) { + return nil, err + } + } var closeLoc *ast.Location @@ -470,18 +636,28 @@ func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) closeLoc = closingLoc(0, 0, '{', '}', rule.Location) } - comments = w.insertComments(comments, closeLoc) + comments, err = w.insertComments(comments, closeLoc) + if err != nil { + return nil, err + } - w.down() + if err := w.down(); err != nil { + return nil, err + } w.startLine() w.write("}") if rule.Else != nil { - comments = w.writeElse(rule, comments) + comments, err = w.writeElse(rule, comments) + if err != nil { + return nil, err + } } - return comments + return comments, nil } -func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comment { +var elseVar ast.Value = ast.Var("else") + +func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) ([]*ast.Comment, error) { // If there was nothing else on the line before the "else" starts // then preserve this style of else block, otherwise it will be // started as an "inline" else eg: @@ -526,9 +702,17 @@ func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comme } rule.Else.Head.Name = "else" // NOTE(sr): whaaat - rule.Else.Head.Reference = ast.Ref{ast.VarTerm("else")} + + elseHeadReference := ast.NewTerm(elseVar) // construct a reference for the term + elseHeadReference.Location = rule.Else.Head.Location // and set the location to match the rule location + + rule.Else.Head.Reference = ast.Ref{elseHeadReference} rule.Else.Head.Args = nil - comments = w.insertComments(comments, rule.Else.Head.Location) + var err error + comments, err = w.insertComments(comments, rule.Else.Head.Location) + if err != nil { + return nil, err + } if hasCommentAbove && !wasInline { // The comments would have ended the line, be sure to start one again @@ -546,14 +730,27 @@ func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comme return w.writeRule(rule.Else, true, comments) } -func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeHead(head *ast.Head, isDefault bool, isExpandedConst bool, comments []*ast.Comment) ([]*ast.Comment, error) { ref := head.Ref() if head.Key != nil && head.Value == nil && !head.HasDynamicRef() { ref = ref.GroundPrefix() } if w.fmtOpts.refHeads || len(ref) == 1 { - w.writeRef(ref) + var err error + comments, err = w.writeRef(ref, comments) + if err != nil { + return nil, err + } } else { + // if there are comments within the object in the rule head, don't format it + if len(comments) > 0 && ref[1].Location.Row == comments[0].Location.Row { + comments, err := w.writeUnformatted(head.Location, comments) + if err != nil { + return nil, err + } + return comments, nil + } + w.write(ref[0].String()) w.write("[") w.write(ref[1].String()) @@ -566,22 +763,34 @@ func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comm for _, arg := range head.Args { args = append(args, arg) } - comments = w.writeIterable(args, head.Location, closingLoc(0, 0, '(', ')', head.Location), comments, w.listWriter()) + var err error + comments, err = w.writeIterable(args, head.Location, closingLoc(0, 0, '(', ')', head.Location), comments, w.listWriter()) w.write(")") + if err != nil { + return comments, err + } } if head.Key != nil { if w.fmtOpts.contains && head.Value == nil { w.write(" contains ") - comments = w.writeTerm(head.Key, comments) + var err error + comments, err = w.writeTerm(head.Key, comments) + if err != nil { + return comments, err + } } else if head.Value == nil { // no `if` for p[x] notation w.write("[") - comments = w.writeTerm(head.Key, comments) + var err error + comments, err = w.writeTerm(head.Key, comments) + if err != nil { + return comments, err + } w.write("]") } } if head.Value != nil && - (head.Key != nil || ast.Compare(head.Value, ast.BooleanTerm(true)) != 0 || isExpandedConst || isDefault) { + (head.Key != nil || !ast.InternedBooleanTerm(true).Equal(head.Value) || isExpandedConst || isDefault) { // in rego v1, explicitly print value for ref-head constants that aren't partial set assignments, e.g.: // * a -> parser error, won't reach here @@ -592,12 +801,12 @@ func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comm if head.Location == head.Value.Location && head.Name != "else" && - ast.Compare(head.Value, ast.BooleanTerm(true)) == 0 && + ast.InternedBooleanTerm(true).Equal(head.Value) && !isRegoV1RefConst { // If the value location is the same as the location of the head, // we know that the value is generated, i.e. f(1) // Don't print the value (` = true`) as it is implied. - return comments + return comments, nil } if head.Assign || w.fmtOpts.regoV1 { @@ -606,24 +815,35 @@ func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comm } else { w.write(" = ") } - comments = w.writeTerm(head.Value, comments) + var err error + comments, err = w.writeTerm(head.Value, comments) + if err != nil { + return comments, err + } } - return comments + return comments, nil } -func (w *writer) insertComments(comments []*ast.Comment, loc *ast.Location) []*ast.Comment { +func (w *writer) insertComments(comments []*ast.Comment, loc *ast.Location) ([]*ast.Comment, error) { before, at, comments := partitionComments(comments, loc) - w.writeComments(before) + + err := w.writeComments(before) + if err != nil { + return nil, err + } if len(before) > 0 && loc.Row-before[len(before)-1].Location.Row > 1 { w.blankLine() } - w.beforeLineEnd(at) - return comments + return comments, w.beforeLineEnd(at) } -func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, body.Loc()) +func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, body.Loc()) + if err != nil { + return comments, err + } for i, expr := range body { // Insert a blank line in before the expression if it was not right // after the previous expression. @@ -640,14 +860,21 @@ func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) []*ast.Commen } w.startLine() - comments = w.writeExpr(expr, comments) + comments, err = w.writeExpr(expr, comments) + if err != nil && !errors.As(err, &unexpectedCommentError{}) { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } w.endLine() } - return comments + return comments, nil } -func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, expr.Location) +func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, expr.Location) + if err != nil { + return comments, err + } if !w.inline { w.startLine() } @@ -658,37 +885,65 @@ func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) []*ast.Comme switch t := expr.Terms.(type) { case *ast.SomeDecl: - comments = w.writeSomeDecl(t, comments) + comments, err = w.writeSomeDecl(t, comments) + if err != nil { + return nil, err + } case *ast.Every: - comments = w.writeEvery(t, comments) + comments, err = w.writeEvery(t, comments) + if err != nil { + return nil, err + } case []*ast.Term: - comments = w.writeFunctionCall(expr, comments) + comments, err = w.writeFunctionCall(expr, comments) + if err != nil { + return comments, err + } case *ast.Term: - comments = w.writeTerm(t, comments) + comments, err = w.writeTerm(t, comments) + if err != nil { + return comments, err + } } - var indented bool + var indented, down bool for i, with := range expr.With { if i == 0 || with.Location.Row == expr.With[i-1].Location.Row { // we're on the same line - comments = w.writeWith(with, comments, false) + comments, err = w.writeWith(with, comments, false) + if err != nil { + return nil, err + } } else { // we're on a new line if !indented { indented = true w.up() - defer w.down() + down = true } w.endLine() w.startLine() - comments = w.writeWith(with, comments, true) + comments, err = w.writeWith(with, comments, true) + if err != nil { + return nil, err + } } } - return comments + if down { + if err := w.down(); err != nil { + return nil, err + } + } + + return comments, nil } -func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, decl.Location) +func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, decl.Location) + if err != nil { + return nil, err + } w.write("some ") row := decl.Location.Row @@ -705,41 +960,66 @@ func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) []*a w.write(" ") } - comments = w.writeTerm(term, comments) + comments, err = w.writeTerm(term, comments) + if err != nil { + return nil, err + } if i < len(decl.Symbols)-1 { w.write(",") } case ast.Call: - comments = w.writeInOperator(false, val[1:], comments, decl.Location, ast.BuiltinMap[val[0].String()].Decl) + comments, err = w.writeInOperator(false, val[1:], comments, decl.Location, ast.BuiltinMap[val[0].String()].Decl) + if err != nil { + return nil, err + } } } - return comments + return comments, nil } -func (w *writer) writeEvery(every *ast.Every, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, every.Location) +func (w *writer) writeEvery(every *ast.Every, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, every.Location) + if err != nil { + return nil, err + } w.write("every ") if every.Key != nil { - comments = w.writeTerm(every.Key, comments) + comments, err = w.writeTerm(every.Key, comments) + if err != nil { + return nil, err + } w.write(", ") } - comments = w.writeTerm(every.Value, comments) + comments, err = w.writeTerm(every.Value, comments) + if err != nil { + return nil, err + } w.write(" in ") - comments = w.writeTerm(every.Domain, comments) + comments, err = w.writeTerm(every.Domain, comments) + if err != nil { + return nil, err + } w.write(" {") - comments = w.writeComprehensionBody('{', '}', every.Body, every.Loc(), every.Loc(), comments) + comments, err = w.writeComprehensionBody('{', '}', every.Body, every.Loc(), every.Loc(), comments) + if err != nil { + // the unexpected comment error is passed up to be handled by writeHead + if !errors.As(err, &unexpectedCommentError{}) { + return nil, err + } + } if len(every.Body) == 1 && every.Body[0].Location.Row == every.Location.Row { w.write(" ") } w.write("}") - return comments + return comments, nil } -func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) ([]*ast.Comment, error) { terms := expr.Terms.([]*ast.Term) operator := terms[0].Value.String() @@ -754,22 +1034,34 @@ func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*a return w.writeFunctionCallPlain(terms, comments) } - numDeclArgs := len(bi.Decl.Args()) + numDeclArgs := bi.Decl.Arity() numCallArgs := len(terms) - 1 + var err error switch numCallArgs { case numDeclArgs: // Print infix where result is unassigned (e.g., x != y) - comments = w.writeTerm(terms[1], comments) + comments, err = w.writeTerm(terms[1], comments) + if err != nil { + return nil, err + } w.write(" " + bi.Infix + " ") return w.writeTerm(terms[2], comments) - case numDeclArgs + 1: // Print infix where result is assigned (e.g., z = x + y) - comments = w.writeTerm(terms[3], comments) + comments, err = w.writeTerm(terms[3], comments) + if err != nil { + return nil, err + } w.write(" " + ast.Equality.Infix + " ") - comments = w.writeTerm(terms[1], comments) + comments, err = w.writeTerm(terms[1], comments) + if err != nil { + return nil, err + } w.write(" " + bi.Infix + " ") - comments = w.writeTerm(terms[2], comments) - return comments + comments, err = w.writeTerm(terms[2], comments) + if err != nil { + return nil, err + } + return comments, nil } // NOTE(Trolloldem): in this point we are operating with a built-in function with the // wrong arity even when the assignment notation is used @@ -777,7 +1069,7 @@ func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*a return w.writeFunctionCallPlain(terms, comments) } -func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) { w.write(terms[0].String() + "(") defer w.write(")") args := make([]interface{}, len(terms)-1) @@ -785,57 +1077,167 @@ func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comme args[i] = t } loc := terms[0].Location - return w.writeIterable(args, loc, closingLoc(0, 0, '(', ')', loc), comments, w.listWriter()) + var err error + comments, err = w.writeIterable(args, loc, closingLoc(0, 0, '(', ')', loc), comments, w.listWriter()) + if err != nil { + return nil, err + } + return comments, nil } -func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented bool) []*ast.Comment { - comments = w.insertComments(comments, with.Location) +func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented bool) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, with.Location) + if err != nil { + return nil, err + } if !indented { w.write(" ") } w.write("with ") - comments = w.writeTerm(with.Target, comments) + comments, err = w.writeTerm(with.Target, comments) + if err != nil { + return nil, err + } w.write(" as ") - return w.writeTerm(with.Value, comments) + comments, err = w.writeTerm(with.Value, comments) + if err != nil { + return nil, err + } + return comments, nil +} + +func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) { + currentComments := make([]*ast.Comment, len(comments)) + copy(currentComments, comments) + + currentLen := w.buf.Len() + + comments, err := w.writeTermParens(false, term, comments) + if err != nil { + if errors.As(err, &unexpectedCommentError{}) { + w.buf.Truncate(currentLen) + + comments, uErr := w.writeUnformatted(term.Location, currentComments) + if uErr != nil { + return nil, uErr + } + return comments, err + } + return nil, err + } + + return comments, nil } -func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) []*ast.Comment { - return w.writeTermParens(false, term, comments) +// writeUnformatted writes the unformatted text instead and updates the comment state +func (w *writer) writeUnformatted(location *ast.Location, currentComments []*ast.Comment) ([]*ast.Comment, error) { + if len(location.Text) == 0 { + return nil, errors.New("original unformatted text is empty") + } + + rawRule := string(location.Text) + rowNum := len(strings.Split(rawRule, "\n")) + + w.write(string(location.Text)) + + comments := make([]*ast.Comment, 0, len(currentComments)) + for _, c := range currentComments { + // if there is a body then wait to write the last comment + if w.writeCommentOnFinalLine && c.Location.Row == location.Row+rowNum-1 { + w.write(" " + string(c.Location.Text)) + continue + } + + // drop comments that occur within the rule raw text + if c.Location.Row < location.Row+rowNum-1 { + continue + } + comments = append(comments, c) + } + return comments, nil } -func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, term.Location) +func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, term.Location) + if err != nil { + return nil, err + } if !w.inline { w.startLine() } switch x := term.Value.(type) { case ast.Ref: - w.writeRef(x) + comments, err = w.writeRef(x, comments) + if err != nil { + return nil, err + } case ast.Object: - comments = w.writeObject(x, term.Location, comments) + comments, err = w.writeObject(x, term.Location, comments) + if err != nil { + return nil, err + } case *ast.Array: - comments = w.writeArray(x, term.Location, comments) + comments, err = w.writeArray(x, term.Location, comments) + if err != nil { + return nil, err + } case ast.Set: - comments = w.writeSet(x, term.Location, comments) + comments, err = w.writeSet(x, term.Location, comments) + if err != nil { + return nil, err + } case *ast.ArrayComprehension: - comments = w.writeArrayComprehension(x, term.Location, comments) + comments, err = w.writeArrayComprehension(x, term.Location, comments) + if err != nil { + return nil, err + } case *ast.ObjectComprehension: - comments = w.writeObjectComprehension(x, term.Location, comments) + comments, err = w.writeObjectComprehension(x, term.Location, comments) + if err != nil { + return nil, err + } case *ast.SetComprehension: - comments = w.writeSetComprehension(x, term.Location, comments) + comments, err = w.writeSetComprehension(x, term.Location, comments) + if err != nil { + return nil, err + } case ast.String: if term.Location.Text[0] == '`' { // To preserve raw strings, we need to output the original text, - // not what x.String() would give us. w.write(string(term.Location.Text)) } else { - w.write(x.String()) + // x.String() cannot be used by default because it can change the input string "\u0000" to "\x00" + var after, quote string + var found bool + // term.Location.Text could contain the prefix `else :=`, remove it + switch term.Location.Text[len(term.Location.Text)-1] { + case '"': + quote = "\"" + _, after, found = strings.Cut(string(term.Location.Text), quote) + case '`': + quote = "`" + _, after, found = strings.Cut(string(term.Location.Text), quote) + } + + if !found { + // If no quoted string was found, that means it is a key being formatted to a string + // e.g. partial_set.y to partial_set["y"] + w.write(x.String()) + } else { + w.write(quote + after) + } + } case ast.Var: w.write(w.formatVar(x)) case ast.Call: - comments = w.writeCall(parens, x, term.Location, comments) + comments, err = w.writeCall(parens, x, term.Location, comments) + if err != nil { + return nil, err + } case fmt.Stringer: w.write(x.String()) } @@ -843,17 +1245,21 @@ func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Co if !w.inline { w.startLine() } - return comments + return comments, nil } -func (w *writer) writeRef(x ast.Ref) { +func (w *writer) writeRef(x ast.Ref, comments []*ast.Comment) ([]*ast.Comment, error) { if len(x) > 0 { parens := false _, ok := x[0].Value.(ast.Call) if ok { parens = x[0].Location.Text[0] == 40 // Starts with "(" } - w.writeTermParens(parens, x[0], nil) + var err error + comments, err = w.writeTermParens(parens, x[0], comments) + if err != nil { + return nil, err + } path := x[1:] for _, t := range path { switch p := t.Value.(type) { @@ -863,11 +1269,21 @@ func (w *writer) writeRef(x ast.Ref) { w.writeBracketed(w.formatVar(p)) default: w.write("[") - w.writeTerm(t, nil) + comments, err = w.writeTerm(t, comments) + if err != nil { + if errors.As(err, &unexpectedCommentError{}) { + // add a new line so that the closing bracket isn't part of the unexpected comment + w.write("\n") + } else { + return nil, err + } + } w.write("]") } } } + + return comments, nil } func (w *writer) writeBracketed(str string) { @@ -885,14 +1301,14 @@ func (w *writer) writeRefStringPath(s ast.String) { } } -func (w *writer) formatVar(v ast.Var) string { +func (*writer) formatVar(v ast.Var) string { if v.IsWildcard() { return ast.Wildcard.String() } return v.String() } -func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { bi, ok := ast.BuiltinMap[x[0].String()] if !ok || bi.Infix == "" { return w.writeFunctionCallPlain(x, comments) @@ -912,57 +1328,80 @@ func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments // NOTE(Trolloldem): writeCall is only invoked when the function call is a term // of another function. The only valid arity is the one of the // built-in function - if len(bi.Decl.Args()) != len(x)-1 { + if bi.Decl.Arity() != len(x)-1 { w.errs = append(w.errs, ArityFormatMismatchError(x[1:], x[0].String(), loc, bi.Decl)) - return comments + return comments, nil } - comments = w.writeTermParens(true, x[1], comments) + var err error + comments, err = w.writeTermParens(true, x[1], comments) + if err != nil { + return nil, err + } w.write(" " + bi.Infix + " ") - comments = w.writeTermParens(true, x[2], comments) + comments, err = w.writeTermParens(true, x[2], comments) + if err != nil { + return nil, err + } if parens { w.write(")") } - return comments + return comments, nil } -func (w *writer) writeInOperator(parens bool, operands []*ast.Term, comments []*ast.Comment, loc *ast.Location, f *types.Function) []*ast.Comment { +func (w *writer) writeInOperator(parens bool, operands []*ast.Term, comments []*ast.Comment, loc *ast.Location, f *types.Function) ([]*ast.Comment, error) { - if len(operands) != len(f.Args()) { + if len(operands) != f.Arity() { // The number of operands does not math the arity of the `in` operator operator := ast.Member.Name - if len(f.Args()) == 3 { + if f.Arity() == 3 { operator = ast.MemberWithKey.Name } w.errs = append(w.errs, ArityFormatMismatchError(operands, operator, loc, f)) - return comments + return comments, nil } kw := "in" + var err error switch len(operands) { case 2: - comments = w.writeTermParens(true, operands[0], comments) + comments, err = w.writeTermParens(true, operands[0], comments) + if err != nil { + return nil, err + } w.write(" ") w.write(kw) w.write(" ") - comments = w.writeTermParens(true, operands[1], comments) + comments, err = w.writeTermParens(true, operands[1], comments) + if err != nil { + return nil, err + } case 3: if parens { w.write("(") defer w.write(")") } - comments = w.writeTermParens(true, operands[0], comments) + comments, err = w.writeTermParens(true, operands[0], comments) + if err != nil { + return nil, err + } w.write(", ") - comments = w.writeTermParens(true, operands[1], comments) + comments, err = w.writeTermParens(true, operands[1], comments) + if err != nil { + return nil, err + } w.write(" ") w.write(kw) w.write(" ") - comments = w.writeTermParens(true, operands[2], comments) + comments, err = w.writeTermParens(true, operands[2], comments) + if err != nil { + return nil, err + } } - return comments + return comments, nil } -func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { w.write("{") defer w.write("}") @@ -973,7 +1412,7 @@ func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast. return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.objectWriter()) } -func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { w.write("[") defer w.write("]") @@ -981,14 +1420,24 @@ func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.C arr.Foreach(func(t *ast.Term) { s = append(s, t) }) - return w.writeIterable(s, loc, closingLoc(0, 0, '[', ']', loc), comments, w.listWriter()) + var err error + comments, err = w.writeIterable(s, loc, closingLoc(0, 0, '[', ']', loc), comments, w.listWriter()) + if err != nil { + return nil, err + } + return comments, nil } -func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { if set.Len() == 0 { w.write("set()") - return w.insertComments(comments, closingLoc(0, 0, '(', ')', loc)) + var err error + comments, err = w.insertComments(comments, closingLoc(0, 0, '(', ')', loc)) + if err != nil { + return nil, err + } + return comments, nil } w.write("{") @@ -998,24 +1447,29 @@ func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Commen set.Foreach(func(t *ast.Term) { s = append(s, t) }) - return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.listWriter()) + var err error + comments, err = w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.listWriter()) + if err != nil { + return nil, err + } + return comments, nil } -func (w *writer) writeArrayComprehension(arr *ast.ArrayComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeArrayComprehension(arr *ast.ArrayComprehension, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { w.write("[") defer w.write("]") return w.writeComprehension('[', ']', arr.Term, arr.Body, loc, comments) } -func (w *writer) writeSetComprehension(set *ast.SetComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeSetComprehension(set *ast.SetComprehension, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { w.write("{") defer w.write("}") return w.writeComprehension('{', '}', set.Term, set.Body, loc, comments) } -func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { w.write("{") defer w.write("}") @@ -1025,12 +1479,16 @@ func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc * w.startLine() } - comments = w.writeTerm(object.Key, comments) + var err error + comments, err = w.writeTerm(object.Key, comments) + if err != nil { + return nil, err + } w.write(": ") return w.writeComprehension('{', '}', object.Value, object.Body, loc, comments) } -func (w *writer) writeComprehension(open, close byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeComprehension(openChar, closeChar byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { if term.Location.Row-loc.Row >= 1 { w.endLine() w.startLine() @@ -1041,55 +1499,82 @@ func (w *writer) writeComprehension(open, close byte, term *ast.Term, body ast.B if ok { parens = term.Location.Text[0] == 40 // Starts with "(" } - comments = w.writeTermParens(parens, term, comments) + var err error + comments, err = w.writeTermParens(parens, term, comments) + if err != nil { + return nil, err + } w.write(" |") - return w.writeComprehensionBody(open, close, body, term.Location, loc, comments) + return w.writeComprehensionBody(openChar, closeChar, body, term.Location, loc, comments) } -func (w *writer) writeComprehensionBody(open, close byte, body ast.Body, term, compr *ast.Location, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeComprehensionBody(openChar, closeChar byte, body ast.Body, term, compr *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { exprs := make([]interface{}, 0, len(body)) for _, expr := range body { exprs = append(exprs, expr) } - lines := groupIterable(exprs, term) + lines, err := w.groupIterable(exprs, term) + if err != nil { + return nil, err + } if body.Loc().Row-term.Row > 0 || len(lines) > 1 { w.endLine() w.up() defer w.startLine() - defer w.down() + defer func() { + if err := w.down(); err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } + }() - comments = w.writeBody(body, comments) + var err error + comments, err = w.writeBody(body, comments) + if err != nil { + return comments, err + } } else { w.write(" ") i := 0 for ; i < len(body)-1; i++ { - comments = w.writeExpr(body[i], comments) + comments, err = w.writeExpr(body[i], comments) + if err != nil { + return comments, err + } w.write("; ") } - comments = w.writeExpr(body[i], comments) + comments, err = w.writeExpr(body[i], comments) + if err != nil { + return comments, err + } } - - return w.insertComments(comments, closingLoc(0, 0, open, close, compr)) + comments, err = w.insertComments(comments, closingLoc(0, 0, openChar, closeChar, compr)) + if err != nil { + return nil, err + } + return comments, nil } -func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) []*ast.Comment { +func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) ([]*ast.Comment, error) { m, comments := mapImportsToComments(imports, comments) groups := groupImports(imports) for _, group := range groups { - comments = w.insertComments(comments, group[0].Loc()) + var err error + comments, err = w.insertComments(comments, group[0].Loc()) + if err != nil { + return nil, err + } // Sort imports within a newline grouping. - sort.Slice(group, func(i, j int) bool { - a := group[i] - b := group[j] - return a.Compare(b) < 0 - }) + slices.SortFunc(group, (*ast.Import).Compare) for _, i := range group { w.startLine() - w.writeImport(i) + err = w.writeImport(i) + if err != nil { + return nil, err + } if c, ok := m[i]; ok { w.write(" " + c.String()) } @@ -1098,10 +1583,10 @@ func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) [] w.blankLine() } - return comments + return comments, nil } -func (w *writer) writeImport(imp *ast.Import) { +func (w *writer) writeImport(imp *ast.Import) error { path := imp.Path.Value.(ast.Ref) buf := []string{"import"} @@ -1111,7 +1596,10 @@ func (w *writer) writeImport(imp *ast.Import) { w2 := writer{ buf: bytes.Buffer{}, } - w2.writeRef(path) + _, err := w2.writeRef(path, nil) + if err != nil { + return err + } buf = append(buf, w2.buf.String()) } else { buf = append(buf, path.String()) @@ -1121,12 +1609,17 @@ func (w *writer) writeImport(imp *ast.Import) { buf = append(buf, "as "+imp.Alias.String()) } w.write(strings.Join(buf, " ")) + + return nil } -type entryWriter func(interface{}, []*ast.Comment) []*ast.Comment +type entryWriter func(interface{}, []*ast.Comment) ([]*ast.Comment, error) -func (w *writer) writeIterable(elements []interface{}, last *ast.Location, close *ast.Location, comments []*ast.Comment, fn entryWriter) []*ast.Comment { - lines := groupIterable(elements, last) +func (w *writer) writeIterable(elements []interface{}, last *ast.Location, close *ast.Location, comments []*ast.Comment, fn entryWriter) ([]*ast.Comment, error) { + lines, err := w.groupIterable(elements, last) + if err != nil { + return nil, err + } if len(lines) > 1 { w.delayBeforeEnd() w.startMultilineSeq() @@ -1134,34 +1627,49 @@ func (w *writer) writeIterable(elements []interface{}, last *ast.Location, close i := 0 for ; i < len(lines)-1; i++ { - comments = w.writeIterableLine(lines[i], comments, fn) + comments, err = w.writeIterableLine(lines[i], comments, fn) + if err != nil { + return nil, err + } w.write(",") w.endLine() w.startLine() } - comments = w.writeIterableLine(lines[i], comments, fn) + comments, err = w.writeIterableLine(lines[i], comments, fn) + if err != nil { + return nil, err + } if len(lines) > 1 { w.write(",") w.endLine() - comments = w.insertComments(comments, close) - w.down() + comments, err = w.insertComments(comments, close) + if err != nil { + return nil, err + } + if err := w.down(); err != nil { + return nil, err + } w.startLine() } - return comments + return comments, nil } -func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comment, fn entryWriter) []*ast.Comment { +func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comment, fn entryWriter) ([]*ast.Comment, error) { if len(elements) == 0 { - return comments + return comments, nil } i := 0 for ; i < len(elements)-1; i++ { - comments = fn(elements[i], comments) + var err error + comments, err = fn(elements[i], comments) + if err != nil { + return nil, err + } w.write(", ") } @@ -1169,7 +1677,7 @@ func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comme } func (w *writer) objectWriter() entryWriter { - return func(x interface{}, comments []*ast.Comment) []*ast.Comment { + return func(x interface{}, comments []*ast.Comment) ([]*ast.Comment, error) { entry := x.([2]*ast.Term) call, isCall := entry[0].Value.(ast.Call) @@ -1180,7 +1688,11 @@ func (w *writer) objectWriter() entryWriter { w.write("(") } - comments = w.writeTerm(entry[0], comments) + var err error + comments, err = w.writeTerm(entry[0], comments) + if err != nil { + return nil, err + } if paren { w.write(")") } @@ -1198,7 +1710,7 @@ func (w *writer) objectWriter() entryWriter { } func (w *writer) listWriter() entryWriter { - return func(x interface{}, comments []*ast.Comment) []*ast.Comment { + return func(x interface{}, comments []*ast.Comment) ([]*ast.Comment, error) { t, ok := x.(*ast.Term) if ok { call, isCall := t.Value.(ast.Call) @@ -1214,7 +1726,7 @@ func (w *writer) listWriter() entryWriter { // groupIterable will group the `elements` slice into slices according to their // location: anything on the same line will be put into a slice. -func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} { +func (w *writer) groupIterable(elements []interface{}, last *ast.Location) ([][]interface{}, error) { // Generated vars occur in the AST when we're rendering the result of // partial evaluation in a bundle build with optimization. // Those variables, and wildcard variables have the "default location", @@ -1241,18 +1753,26 @@ func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} { return false }) if def { // return as-is - return [][]interface{}{elements} + return [][]interface{}{elements}, nil } } - sort.Slice(elements, func(i, j int) bool { - return locLess(elements[i], elements[j]) + + slices.SortFunc(elements, func(i, j any) int { + l, err := locCmp(i, j) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, err.Error())) + } + return l }) var lines [][]interface{} cur := make([]interface{}, 0, len(elements)) for i, t := range elements { elem := t - loc := getLoc(elem) + loc, err := getLoc(elem) + if err != nil { + return nil, err + } lineDiff := loc.Row - last.Row if lineDiff > 0 && i > 0 { lines = append(lines, cur) @@ -1262,7 +1782,7 @@ func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} { last = loc cur = append(cur, elem) } - return append(lines, cur) + return append(lines, cur), nil } func mapImportsToComments(imports []*ast.Import, comments []*ast.Comment) (map[*ast.Import]*ast.Comment, []*ast.Comment) { @@ -1318,14 +1838,37 @@ func groupImports(imports []*ast.Import) [][]*ast.Import { return groups } -func partitionComments(comments []*ast.Comment, l *ast.Location) (before []*ast.Comment, at *ast.Comment, after []*ast.Comment) { +func partitionComments(comments []*ast.Comment, l *ast.Location) ([]*ast.Comment, *ast.Comment, []*ast.Comment) { + if len(comments) == 0 { + return nil, nil, nil + } + + numBefore, numAfter := 0, 0 + for _, c := range comments { + switch cmp := c.Location.Row - l.Row; { + case cmp < 0: + numBefore++ + case cmp > 0: + numAfter++ + } + } + + if numAfter == len(comments) { + return nil, nil, comments + } + + var at *ast.Comment + + before := make([]*ast.Comment, 0, numBefore) + after := comments[0 : 0 : len(comments)-numBefore] + for _, c := range comments { switch cmp := c.Location.Row - l.Row; { case cmp < 0: before = append(before, c) case cmp > 0: after = append(after, c) - case cmp == 0: + default: at = c } } @@ -1361,43 +1904,52 @@ loop: return rules, others[i:] } -func locLess(a, b interface{}) bool { - return locCmp(a, b) < 0 +func locLess(a, b interface{}) (bool, error) { + c, err := locCmp(a, b) + return c < 0, err } -func locCmp(a, b interface{}) int { - al := getLoc(a) - bl := getLoc(b) +func locCmp(a, b interface{}) (int, error) { + al, err := getLoc(a) + if err != nil { + return 0, err + } + bl, err := getLoc(b) + if err != nil { + return 0, err + } switch { case al == nil && bl == nil: - return 0 + return 0, nil case al == nil: - return -1 + return -1, nil case bl == nil: - return 1 + return 1, nil } if cmp := al.Row - bl.Row; cmp != 0 { - return cmp + return cmp, nil } - return al.Col - bl.Col + return al.Col - bl.Col, nil } -func getLoc(x interface{}) *ast.Location { +func getLoc(x interface{}) (*ast.Location, error) { switch x := x.(type) { case ast.Node: // *ast.Head, *ast.Expr, *ast.With, *ast.Term - return x.Loc() + return x.Loc(), nil case *ast.Location: - return x + return x, nil case [2]*ast.Term: // Special case to allow for easy printing of objects. - return x[0].Location + return x[0].Location, nil default: - panic("Not reached") + return nil, fmt.Errorf("unable to get location for type %v", x) } } -func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.Location { +var negativeRow = &ast.Location{Row: -1} + +func closingLoc(skipOpen, skipClose, openChar, closeChar byte, loc *ast.Location) *ast.Location { i, offset := 0, 0 // Skip past parens/brackets/braces in rule heads. @@ -1406,26 +1958,26 @@ func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.L } for ; i < len(loc.Text); i++ { - if loc.Text[i] == open { + if loc.Text[i] == openChar { break } } if i >= len(loc.Text) { - return &ast.Location{Row: -1} + return negativeRow } state := 1 for state > 0 { i++ if i >= len(loc.Text) { - return &ast.Location{Row: -1} + return negativeRow } switch loc.Text[i] { - case open: + case openChar: state++ - case close: + case closeChar: state-- case '\n': offset++ @@ -1435,10 +1987,10 @@ func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.L return &ast.Location{Row: loc.Row + offset} } -func skipPast(open, close byte, loc *ast.Location) (int, int) { +func skipPast(openChar, closeChar byte, loc *ast.Location) (int, int) { i := 0 for ; i < len(loc.Text); i++ { - if loc.Text[i] == open { + if loc.Text[i] == openChar { break } } @@ -1452,9 +2004,9 @@ func skipPast(open, close byte, loc *ast.Location) (int, int) { } switch loc.Text[i] { - case open: + case openChar: state++ - case close: + case closeChar: state-- case '\n': offset++ @@ -1467,7 +2019,7 @@ func skipPast(open, close byte, loc *ast.Location) (int, int) { // startLine begins a line with the current indentation level. func (w *writer) startLine() { w.inline = true - for i := 0; i < w.level; i++ { + for range w.level { w.write(w.indent) } } @@ -1483,15 +2035,46 @@ func (w *writer) endLine() { w.write("\n") } +type unexpectedCommentError struct { + newComment string + newCommentRow int + existingComment string + existingCommentRow int +} + +func (u unexpectedCommentError) Error() string { + return fmt.Sprintf("unexpected new comment (%s) on line %d because there is already a comment (%s) registered for line %d", + u.newComment, u.newCommentRow, u.existingComment, u.existingCommentRow) +} + // beforeLineEnd registers a comment to be printed at the end of the current line. -func (w *writer) beforeLineEnd(c *ast.Comment) { +func (w *writer) beforeLineEnd(c *ast.Comment) error { if w.beforeEnd != nil { if c == nil { - return + return nil + } + + existingComment := truncatedString(w.beforeEnd.String(), 100) + existingCommentRow := w.beforeEnd.Location.Row + newComment := truncatedString(c.String(), 100) + w.beforeEnd = nil + + return unexpectedCommentError{ + newComment: newComment, + newCommentRow: c.Location.Row, + existingComment: existingComment, + existingCommentRow: existingCommentRow, } - panic("overwriting non-nil beforeEnd") } w.beforeEnd = c + return nil +} + +func truncatedString(s string, max int) string { + if len(s) > max { + return s[:max-2] + "..." + } + return s } func (w *writer) delayBeforeEnd() { @@ -1533,11 +2116,12 @@ func (w *writer) up() { } // down decreases the indentation level -func (w *writer) down() { +func (w *writer) down() error { if w.level == 0 { - panic("negative indentation level") + return errors.New("negative indentation level") } w.level-- + return nil } func ensureFutureKeywordImport(imps []*ast.Import, kw string) []*ast.Import { @@ -1589,22 +2173,22 @@ func ensureImport(imps []*ast.Import, path ast.Ref) []*ast.Import { return append(imps, imp) } -// ArgErrDetail but for `fmt` checks since compiler has not run yet. +// ArityFormatErrDetail but for `fmt` checks since compiler has not run yet. type ArityFormatErrDetail struct { Have []string `json:"have"` Want []string `json:"want"` } -// arityMismatchError but for `fmt` checks since the compiler has not run yet. +// ArityFormatMismatchError but for `fmt` checks since the compiler has not run yet. func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Location, f *types.Function) *ast.Error { - want := make([]string, len(f.Args())) - for i := range f.Args() { - want[i] = types.Sprint(f.Args()[i]) + want := make([]string, f.Arity()) + for i, arg := range f.FuncArgs().Args { + want[i] = types.Sprint(arg) } have := make([]string, len(operands)) - for i := 0; i < len(operands); i++ { - have[i] = ast.TypeName(operands[i].Value) + for i := range operands { + have[i] = ast.ValueName(operands[i].Value) } err := ast.NewError(ast.TypeErr, loc, "%s: %s", operator, "arity mismatch") err.Details = &ArityFormatErrDetail{ @@ -1617,8 +2201,8 @@ func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Lo // Lines returns the string representation of the detail. func (d *ArityFormatErrDetail) Lines() []string { return []string{ - "have: " + "(" + strings.Join(d.Have, ",") + ")", - "want: " + "(" + strings.Join(d.Want, ",") + ")", + "have: (" + strings.Join(d.Have, ",") + ")", + "want: (" + strings.Join(d.Want, ",") + ")", } } @@ -1631,10 +2215,12 @@ func moduleIsRegoV1Compatible(m *ast.Module) bool { return false } +var v1StringTerm = ast.StringTerm("v1") + // isRegoV1Compatible returns true if the passed *ast.Import is `rego.v1` func isRegoV1Compatible(imp *ast.Import) bool { path := imp.Path.Value.(ast.Ref) return len(path) == 2 && ast.RegoRootDocument.Equal(path[0]) && - path[1].Equal(ast.StringTerm("v1")) + path[1].Equal(v1StringTerm) } diff --git a/vendor/github.com/open-policy-agent/opa/hooks/hooks.go b/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go similarity index 98% rename from vendor/github.com/open-policy-agent/opa/hooks/hooks.go rename to vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go index 9659d7b4997..caf69b12428 100644 --- a/vendor/github.com/open-policy-agent/opa/hooks/hooks.go +++ b/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go @@ -8,7 +8,7 @@ import ( "context" "fmt" - "github.com/open-policy-agent/opa/config" + "github.com/open-policy-agent/opa/v1/config" ) // Hook is a hook to be called in some select places in OPA's operation. diff --git a/vendor/github.com/open-policy-agent/opa/ir/ir.go b/vendor/github.com/open-policy-agent/opa/v1/ir/ir.go similarity index 99% rename from vendor/github.com/open-policy-agent/opa/ir/ir.go rename to vendor/github.com/open-policy-agent/opa/v1/ir/ir.go index c07670704e4..3657a9b673e 100644 --- a/vendor/github.com/open-policy-agent/opa/ir/ir.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ir/ir.go @@ -11,7 +11,7 @@ package ir import ( "fmt" - "github.com/open-policy-agent/opa/types" + "github.com/open-policy-agent/opa/v1/types" ) type ( @@ -106,7 +106,7 @@ const ( Unused ) -func (a *Policy) String() string { +func (*Policy) String() string { return "Policy" } diff --git a/vendor/github.com/open-policy-agent/opa/ir/marshal.go b/vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go similarity index 93% rename from vendor/github.com/open-policy-agent/opa/ir/marshal.go rename to vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go index 69f4b5caf69..f792e2c1b69 100644 --- a/vendor/github.com/open-policy-agent/opa/ir/marshal.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go @@ -6,6 +6,7 @@ package ir import ( "encoding/json" + "fmt" "reflect" ) @@ -50,7 +51,11 @@ func (a *Operand) UnmarshalJSON(bs []byte) error { if err := json.Unmarshal(bs, &typed); err != nil { return err } - x := valFactories[typed.Type]() + f, ok := valFactories[typed.Type] + if !ok { + return fmt.Errorf("unrecognized value type %q", typed.Type) + } + x := f() if err := json.Unmarshal(typed.Value, &x); err != nil { return err } @@ -77,7 +82,11 @@ type rawTypedStmt struct { } func (raw rawTypedStmt) Unmarshal() (Stmt, error) { - x := stmtFactories[raw.Type]() + f, ok := stmtFactories[raw.Type] + if !ok { + return nil, fmt.Errorf("unrecognized statement type %q", raw.Type) + } + x := f() if err := json.Unmarshal(raw.Stmt, &x); err != nil { return nil, err } @@ -119,6 +128,7 @@ var stmtFactories = map[string]func() Stmt{ "IsArrayStmt": func() Stmt { return &IsArrayStmt{} }, "IsObjectStmt": func() Stmt { return &IsObjectStmt{} }, "IsDefinedStmt": func() Stmt { return &IsDefinedStmt{} }, + "IsSetStmt": func() Stmt { return &IsSetStmt{} }, "IsUndefinedStmt": func() Stmt { return &IsUndefinedStmt{} }, "ArrayAppendStmt": func() Stmt { return &ArrayAppendStmt{} }, "ObjectInsertStmt": func() Stmt { return &ObjectInsertStmt{} }, diff --git a/vendor/github.com/open-policy-agent/opa/ir/pretty.go b/vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/ir/pretty.go rename to vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go diff --git a/vendor/github.com/open-policy-agent/opa/ir/walk.go b/vendor/github.com/open-policy-agent/opa/v1/ir/walk.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/ir/walk.go rename to vendor/github.com/open-policy-agent/opa/v1/ir/walk.go diff --git a/vendor/github.com/open-policy-agent/opa/keys/keys.go b/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go similarity index 98% rename from vendor/github.com/open-policy-agent/opa/keys/keys.go rename to vendor/github.com/open-policy-agent/opa/v1/keys/keys.go index de034969438..fba7a9c9398 100644 --- a/vendor/github.com/open-policy-agent/opa/keys/keys.go +++ b/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go @@ -5,7 +5,7 @@ import ( "fmt" "os" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/util" ) const defaultSigningAlgorithm = "RS256" diff --git a/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go b/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go new file mode 100644 index 00000000000..55b8e7dc446 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go @@ -0,0 +1,62 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package loader + +import ( + "fmt" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// Errors is a wrapper for multiple loader errors. +type Errors []error + +func (e Errors) Error() string { + if len(e) == 0 { + return "no error(s)" + } + if len(e) == 1 { + return "1 error occurred during loading: " + e[0].Error() + } + buf := make([]string, len(e)) + for i := range buf { + buf[i] = e[i].Error() + } + return fmt.Sprintf("%v errors occurred during loading:\n", len(e)) + strings.Join(buf, "\n") +} + +func (e *Errors) add(err error) { + if errs, ok := err.(ast.Errors); ok { + for i := range errs { + *e = append(*e, errs[i]) + } + } else { + *e = append(*e, err) + } +} + +type unsupportedDocumentType string + +func (path unsupportedDocumentType) Error() string { + return string(path) + ": document must be of type object" +} + +type unrecognizedFile string + +func (path unrecognizedFile) Error() string { + return string(path) + ": can't recognize file type" +} + +func isUnrecognizedFile(err error) bool { + _, ok := err.(unrecognizedFile) + return ok +} + +type mergeError string + +func (e mergeError) Error() string { + return string(e) + ": merge error" +} diff --git a/vendor/github.com/open-policy-agent/opa/loader/extension/extension.go b/vendor/github.com/open-policy-agent/opa/v1/loader/extension/extension.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/loader/extension/extension.go rename to vendor/github.com/open-policy-agent/opa/v1/loader/extension/extension.go diff --git a/vendor/github.com/open-policy-agent/opa/loader/filter/filter.go b/vendor/github.com/open-policy-agent/opa/v1/loader/filter/filter.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/loader/filter/filter.go rename to vendor/github.com/open-policy-agent/opa/v1/loader/filter/filter.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go b/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go new file mode 100644 index 00000000000..5e2217473ae --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go @@ -0,0 +1,834 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package loader contains utilities for loading files into OPA. +package loader + +import ( + "bytes" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/yaml" + + fileurl "github.com/open-policy-agent/opa/internal/file/url" + "github.com/open-policy-agent/opa/internal/merge" + "github.com/open-policy-agent/opa/v1/ast" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/loader/filter" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/inmem" + "github.com/open-policy-agent/opa/v1/util" +) + +// Result represents the result of successfully loading zero or more files. +type Result struct { + Documents map[string]interface{} + Modules map[string]*RegoFile + path []string +} + +// ParsedModules returns the parsed modules stored on the result. +func (l *Result) ParsedModules() map[string]*ast.Module { + modules := make(map[string]*ast.Module) + for _, module := range l.Modules { + modules[module.Name] = module.Parsed + } + return modules +} + +// Compiler returns a Compiler object with the compiled modules from this loader +// result. +func (l *Result) Compiler() (*ast.Compiler, error) { + compiler := ast.NewCompiler() + compiler.Compile(l.ParsedModules()) + if compiler.Failed() { + return nil, compiler.Errors + } + return compiler, nil +} + +// Store returns a Store object with the documents from this loader result. +func (l *Result) Store() (storage.Store, error) { + return l.StoreWithOpts() +} + +// StoreWithOpts returns a Store object with the documents from this loader result, +// instantiated with the passed options. +func (l *Result) StoreWithOpts(opts ...inmem.Opt) (storage.Store, error) { + return inmem.NewFromObjectWithOpts(l.Documents, opts...), nil +} + +// RegoFile represents the result of loading a single Rego source file. +type RegoFile struct { + Name string + Parsed *ast.Module + Raw []byte +} + +// Filter defines the interface for filtering files during loading. If the +// filter returns true, the file should be excluded from the result. +type Filter = filter.LoaderFilter + +// GlobExcludeName excludes files and directories whose names do not match the +// shell style pattern at minDepth or greater. +func GlobExcludeName(pattern string, minDepth int) Filter { + return func(_ string, info fs.FileInfo, depth int) bool { + match, _ := filepath.Match(pattern, info.Name()) + return match && depth >= minDepth + } +} + +// FileLoader defines an interface for loading OPA data files +// and Rego policies. +type FileLoader interface { + All(paths []string) (*Result, error) + Filtered(paths []string, filter Filter) (*Result, error) + AsBundle(path string) (*bundle.Bundle, error) + WithReader(io.Reader) FileLoader + WithFS(fs.FS) FileLoader + WithMetrics(metrics.Metrics) FileLoader + WithFilter(Filter) FileLoader + WithBundleVerificationConfig(*bundle.VerificationConfig) FileLoader + WithSkipBundleVerification(bool) FileLoader + WithProcessAnnotation(bool) FileLoader + WithCapabilities(*ast.Capabilities) FileLoader + // Deprecated: Use SetOptions in the json package instead, where a longer description + // of why this is deprecated also can be found. + WithJSONOptions(*astJSON.Options) FileLoader + WithRegoVersion(ast.RegoVersion) FileLoader + WithFollowSymlinks(bool) FileLoader +} + +// NewFileLoader returns a new FileLoader instance. +func NewFileLoader() FileLoader { + return &fileLoader{ + metrics: metrics.New(), + files: make(map[string]bundle.FileInfo), + } +} + +type fileLoader struct { + metrics metrics.Metrics + filter Filter + bvc *bundle.VerificationConfig + skipVerify bool + files map[string]bundle.FileInfo + opts ast.ParserOptions + fsys fs.FS + reader io.Reader + followSymlinks bool +} + +// WithFS provides an fs.FS to use for loading files. You can pass nil to +// use plain IO calls (e.g. os.Open, os.Stat, etc.), this is the default +// behaviour. +func (fl *fileLoader) WithFS(fsys fs.FS) FileLoader { + fl.fsys = fsys + return fl +} + +// WithReader provides an io.Reader to use for loading the bundle tarball. +// An io.Reader passed via WithReader takes precedence over an fs.FS passed +// via WithFS. +func (fl *fileLoader) WithReader(rdr io.Reader) FileLoader { + fl.reader = rdr + return fl +} + +// WithMetrics provides the metrics instance to use while loading +func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader { + fl.metrics = m + return fl +} + +// WithFilter specifies the filter object to use to filter files while loading +func (fl *fileLoader) WithFilter(filter Filter) FileLoader { + fl.filter = filter + return fl +} + +// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle +func (fl *fileLoader) WithBundleVerificationConfig(config *bundle.VerificationConfig) FileLoader { + fl.bvc = config + return fl +} + +// WithSkipBundleVerification skips verification of a signed bundle +func (fl *fileLoader) WithSkipBundleVerification(skipVerify bool) FileLoader { + fl.skipVerify = skipVerify + return fl +} + +// WithProcessAnnotation enables or disables processing of schema annotations on rules +func (fl *fileLoader) WithProcessAnnotation(processAnnotation bool) FileLoader { + fl.opts.ProcessAnnotation = processAnnotation + return fl +} + +// WithCapabilities sets the supported capabilities when loading the files +func (fl *fileLoader) WithCapabilities(caps *ast.Capabilities) FileLoader { + fl.opts.Capabilities = caps + return fl +} + +// WithJSONOptions sets the JSON options on the parser (now a no-op). +// +// Deprecated: Use SetOptions in the json package instead, where a longer description +// of why this is deprecated also can be found. +func (fl *fileLoader) WithJSONOptions(*astJSON.Options) FileLoader { + return fl +} + +// WithRegoVersion sets the ast.RegoVersion to use when parsing and compiling modules. +func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader { + fl.opts.RegoVersion = version + return fl +} + +// WithFollowSymlinks enables or disables following symlinks when loading files +func (fl *fileLoader) WithFollowSymlinks(followSymlinks bool) FileLoader { + fl.followSymlinks = followSymlinks + return fl +} + +// All returns a Result object loaded (recursively) from the specified paths. +func (fl fileLoader) All(paths []string) (*Result, error) { + return fl.Filtered(paths, nil) +} + +// Filtered returns a Result object loaded (recursively) from the specified +// paths while applying the given filters. If any filter returns true, the +// file/directory is excluded. +func (fl fileLoader) Filtered(paths []string, filter Filter) (*Result, error) { + return all(fl.fsys, paths, filter, func(curr *Result, path string, depth int) error { + + var ( + bs []byte + err error + ) + if fl.fsys != nil { + bs, err = fs.ReadFile(fl.fsys, path) + } else { + bs, err = os.ReadFile(path) + } + if err != nil { + return err + } + + result, err := loadKnownTypes(path, bs, fl.metrics, fl.opts) + if err != nil { + if !isUnrecognizedFile(err) { + return err + } + if depth > 0 { + return nil + } + result, err = loadFileForAnyType(path, bs, fl.metrics, fl.opts) + if err != nil { + return err + } + } + + return curr.merge(path, result) + }) +} + +// AsBundle loads a path as a bundle. If it is a single file +// it will be treated as a normal tarball bundle. If a directory +// is supplied it will be loaded as an unzipped bundle tree. +func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { + path, err := fileurl.Clean(path) + if err != nil { + return nil, err + } + + if err := checkForUNCPath(path); err != nil { + return nil, err + } + + var bundleLoader bundle.DirectoryLoader + var isDir bool + if fl.reader != nil { + bundleLoader = bundle.NewTarballLoaderWithBaseURL(fl.reader, path).WithFilter(fl.filter) + } else { + bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter) + } + + if err != nil { + return nil, err + } + bundleLoader = bundleLoader.WithFollowSymlinks(fl.followSymlinks) + + br := bundle.NewCustomReader(bundleLoader). + WithMetrics(fl.metrics). + WithBundleVerificationConfig(fl.bvc). + WithSkipBundleVerification(fl.skipVerify). + WithProcessAnnotations(fl.opts.ProcessAnnotation). + WithCapabilities(fl.opts.Capabilities). + WithFollowSymlinks(fl.followSymlinks). + WithRegoVersion(fl.opts.RegoVersion) + + // For bundle directories add the full path in front of module file names + // to simplify debugging. + if isDir { + br.WithBaseDir(path) + } + + b, err := br.Read() + if err != nil { + err = fmt.Errorf("bundle %s: %w", path, err) + } + + return &b, err +} + +// GetBundleDirectoryLoader returns a bundle directory loader which can be used to load +// files in the directory +func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) { + return GetBundleDirectoryLoaderFS(nil, path, nil) +} + +// GetBundleDirectoryLoaderWithFilter returns a bundle directory loader which can be used to load +// files in the directory after applying the given filter. +func GetBundleDirectoryLoaderWithFilter(path string, filter Filter) (bundle.DirectoryLoader, bool, error) { + return GetBundleDirectoryLoaderFS(nil, path, filter) +} + +// GetBundleDirectoryLoaderFS returns a bundle directory loader which can be used to load +// files in the directory. +func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle.DirectoryLoader, bool, error) { + path, err := fileurl.Clean(path) + if err != nil { + return nil, false, err + } + + if err := checkForUNCPath(path); err != nil { + return nil, false, err + } + + var fi fs.FileInfo + if fsys != nil { + fi, err = fs.Stat(fsys, path) + } else { + fi, err = os.Stat(path) + } + if err != nil { + return nil, false, fmt.Errorf("error reading %q: %s", path, err) + } + + var bundleLoader bundle.DirectoryLoader + if fi.IsDir() { + if fsys != nil { + bundleLoader = bundle.NewFSLoaderWithRoot(fsys, path) + } else { + bundleLoader = bundle.NewDirectoryLoader(path) + } + } else { + var fh fs.File + if fsys != nil { + fh, err = fsys.Open(path) + } else { + fh, err = os.Open(path) + } + if err != nil { + return nil, false, err + } + bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path) + } + + if filter != nil { + bundleLoader = bundleLoader.WithFilter(filter) + } + return bundleLoader, fi.IsDir(), nil +} + +// FilteredPaths is the same as FilterPathsFS using the current diretory file +// system +func FilteredPaths(paths []string, filter Filter) ([]string, error) { + return FilteredPathsFS(nil, paths, filter) +} + +// FilteredPathsFS return a list of files from the specified +// paths while applying the given filters. If any filter returns true, the +// file/directory is excluded. +func FilteredPathsFS(fsys fs.FS, paths []string, filter Filter) ([]string, error) { + result := []string{} + + _, err := all(fsys, paths, filter, func(_ *Result, path string, _ int) error { + result = append(result, path) + return nil + }) + if err != nil { + return nil, err + } + return result, nil +} + +// Schemas loads a schema set from the specified file path. +func Schemas(schemaPath string) (*ast.SchemaSet, error) { + + var errs Errors + ss, err := loadSchemas(schemaPath) + if err != nil { + errs.add(err) + return nil, errs + } + + return ss, nil +} + +func loadSchemas(schemaPath string) (*ast.SchemaSet, error) { + + if schemaPath == "" { + return nil, nil + } + + ss := ast.NewSchemaSet() + path, err := fileurl.Clean(schemaPath) + if err != nil { + return nil, err + } + + info, err := os.Stat(path) + if err != nil { + return nil, err + } + + // Handle single file case. + if !info.IsDir() { + schema, err := loadOneSchema(path) + if err != nil { + return nil, err + } + ss.Put(ast.SchemaRootRef, schema) + return ss, nil + + } + + // Handle directory case. + rootDir := path + + err = filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } else if info.IsDir() { + return nil + } + + schema, err := loadOneSchema(path) + if err != nil { + return err + } + + relPath, err := filepath.Rel(rootDir, path) + if err != nil { + return err + } + + key := getSchemaSetByPathKey(relPath) + ss.Put(key, schema) + return nil + }) + + if err != nil { + return nil, err + } + + return ss, nil +} + +func getSchemaSetByPathKey(path string) ast.Ref { + + front := filepath.Dir(path) + last := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) + + var parts []string + + if front != "." { + parts = append(strings.Split(filepath.ToSlash(front), "/"), last) + } else { + parts = []string{last} + } + + key := make(ast.Ref, 1+len(parts)) + key[0] = ast.SchemaRootDocument + for i := range parts { + key[i+1] = ast.StringTerm(parts[i]) + } + + return key +} + +func loadOneSchema(path string) (interface{}, error) { + bs, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var schema interface{} + if err := util.Unmarshal(bs, &schema); err != nil { + return nil, fmt.Errorf("%s: %w", path, err) + } + + return schema, nil +} + +// All returns a Result object loaded (recursively) from the specified paths. +// Deprecated: Use FileLoader.Filtered() instead. +func All(paths []string) (*Result, error) { + return NewFileLoader().Filtered(paths, nil) +} + +// Filtered returns a Result object loaded (recursively) from the specified +// paths while applying the given filters. If any filter returns true, the +// file/directory is excluded. +// Deprecated: Use FileLoader.Filtered() instead. +func Filtered(paths []string, filter Filter) (*Result, error) { + return NewFileLoader().Filtered(paths, filter) +} + +// AsBundle loads a path as a bundle. If it is a single file +// it will be treated as a normal tarball bundle. If a directory +// is supplied it will be loaded as an unzipped bundle tree. +// Deprecated: Use FileLoader.AsBundle() instead. +func AsBundle(path string) (*bundle.Bundle, error) { + return NewFileLoader().AsBundle(path) +} + +// AllRegos returns a Result object loaded (recursively) with all Rego source +// files from the specified paths. +func AllRegos(paths []string) (*Result, error) { + return NewFileLoader().Filtered(paths, func(_ string, info os.FileInfo, _ int) bool { + return !info.IsDir() && !strings.HasSuffix(info.Name(), bundle.RegoExt) + }) +} + +// Rego is deprecated. Use RegoWithOpts instead. +func Rego(path string) (*RegoFile, error) { + return RegoWithOpts(path, ast.ParserOptions{}) +} + +// RegoWithOpts returns a RegoFile object loaded from the given path. +func RegoWithOpts(path string, opts ast.ParserOptions) (*RegoFile, error) { + path, err := fileurl.Clean(path) + if err != nil { + return nil, err + } + bs, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return loadRego(path, bs, metrics.New(), opts) +} + +// CleanPath returns the normalized version of a path that can be used as an identifier. +func CleanPath(path string) string { + return strings.Trim(path, "/") +} + +// Paths returns a sorted list of files contained at path. If recurse is true +// and path is a directory, then Paths will walk the directory structure +// recursively and list files at each level. +func Paths(path string, recurse bool) (paths []string, err error) { + path, err = fileurl.Clean(path) + if err != nil { + return nil, err + } + err = filepath.Walk(path, func(f string, _ os.FileInfo, _ error) error { + if !recurse { + if path != f && path != filepath.Dir(f) { + return filepath.SkipDir + } + } + paths = append(paths, f) + return nil + }) + return paths, err +} + +// Dirs resolves filepaths to directories. It will return a list of unique +// directories. +func Dirs(paths []string) []string { + unique := map[string]struct{}{} + + for _, path := range paths { + // TODO: /dir/dir will register top level directory /dir + dir := filepath.Dir(path) + unique[dir] = struct{}{} + } + + return util.KeysSorted(unique) +} + +// SplitPrefix returns a tuple specifying the document prefix and the file +// path. +func SplitPrefix(path string) ([]string, string) { + // Non-prefixed URLs can be returned without modification and their contents + // can be rooted directly under data. + if strings.Index(path, "://") == strings.Index(path, ":") { + return nil, path + } + parts := strings.SplitN(path, ":", 2) + if len(parts) == 2 && len(parts[0]) > 0 { + return strings.Split(parts[0], "."), parts[1] + } + return nil, path +} + +func (l *Result) merge(path string, result interface{}) error { + switch result := result.(type) { + case bundle.Bundle: + for _, module := range result.Modules { + l.Modules[module.Path] = &RegoFile{ + Name: module.Path, + Parsed: module.Parsed, + Raw: module.Raw, + } + } + return l.mergeDocument(path, result.Data) + case *RegoFile: + l.Modules[CleanPath(path)] = result + return nil + default: + return l.mergeDocument(path, result) + } +} + +func (l *Result) mergeDocument(path string, doc interface{}) error { + obj, ok := makeDir(l.path, doc) + if !ok { + return unsupportedDocumentType(path) + } + merged, ok := merge.InterfaceMaps(l.Documents, obj) + if !ok { + return mergeError(path) + } + for k := range merged { + l.Documents[k] = merged[k] + } + return nil +} + +func (l *Result) withParent(p string) *Result { + path := append(l.path, p) + return &Result{ + Documents: l.Documents, + Modules: l.Modules, + path: path, + } +} + +func newResult() *Result { + return &Result{ + Documents: map[string]interface{}{}, + Modules: map[string]*RegoFile{}, + } +} + +func all(fsys fs.FS, paths []string, filter Filter, f func(*Result, string, int) error) (*Result, error) { + errs := Errors{} + root := newResult() + + for _, path := range paths { + + // Paths can be prefixed with a string that specifies where content should be + // loaded under data. E.g., foo.bar:/path/to/some.json will load the content + // of some.json under {"foo": {"bar": ...}}. + loaded := root + prefix, path := SplitPrefix(path) + if len(prefix) > 0 { + for _, part := range prefix { + loaded = loaded.withParent(part) + } + } + + allRec(fsys, path, filter, &errs, loaded, 0, f) + } + + if len(errs) > 0 { + return nil, errs + } + + return root, nil +} + +func allRec(fsys fs.FS, path string, filter Filter, errors *Errors, loaded *Result, depth int, f func(*Result, string, int) error) { + + path, err := fileurl.Clean(path) + if err != nil { + errors.add(err) + return + } + + if err := checkForUNCPath(path); err != nil { + errors.add(err) + return + } + + var info fs.FileInfo + if fsys != nil { + info, err = fs.Stat(fsys, path) + } else { + info, err = os.Stat(path) + } + + if err != nil { + errors.add(err) + return + } + + if filter != nil && filter(path, info, depth) { + return + } + + if !info.IsDir() { + if err := f(loaded, path, depth); err != nil { + errors.add(err) + } + return + } + + // If we are recursing on directories then content must be loaded under path + // specified by directory hierarchy. + if depth > 0 { + loaded = loaded.withParent(info.Name()) + } + + var files []fs.DirEntry + if fsys != nil { + files, err = fs.ReadDir(fsys, path) + } else { + files, err = os.ReadDir(path) + } + if err != nil { + errors.add(err) + return + } + + for _, file := range files { + allRec(fsys, filepath.Join(path, file.Name()), filter, errors, loaded, depth+1, f) + } +} + +func loadKnownTypes(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { + switch filepath.Ext(path) { + case ".json": + return loadJSON(path, bs, m) + case ".rego": + return loadRego(path, bs, m, opts) + case ".yaml", ".yml": + return loadYAML(path, bs, m) + default: + if strings.HasSuffix(path, ".tar.gz") { + r, err := loadBundleFile(path, bs, m, opts) + if err != nil { + err = fmt.Errorf("bundle %s: %w", path, err) + } + return r, err + } + } + return nil, unrecognizedFile(path) +} + +func loadFileForAnyType(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { + module, err := loadRego(path, bs, m, opts) + if err == nil { + return module, nil + } + doc, err := loadJSON(path, bs, m) + if err == nil { + return doc, nil + } + doc, err = loadYAML(path, bs, m) + if err == nil { + return doc, nil + } + return nil, unrecognizedFile(path) +} + +func loadBundleFile(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (bundle.Bundle, error) { + tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path) + br := bundle.NewCustomReader(tl). + WithRegoVersion(opts.RegoVersion). + WithCapabilities(opts.Capabilities). + WithProcessAnnotations(opts.ProcessAnnotation). + WithMetrics(m). + WithSkipBundleVerification(true). + IncludeManifestInData(true) + return br.Read() +} + +func loadRego(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (*RegoFile, error) { + m.Timer(metrics.RegoModuleParse).Start() + var module *ast.Module + var err error + module, err = ast.ParseModuleWithOpts(path, string(bs), opts) + m.Timer(metrics.RegoModuleParse).Stop() + if err != nil { + return nil, err + } + result := &RegoFile{ + Name: path, + Parsed: module, + Raw: bs, + } + return result, nil +} + +func loadJSON(path string, bs []byte, m metrics.Metrics) (interface{}, error) { + m.Timer(metrics.RegoDataParse).Start() + var x interface{} + err := util.UnmarshalJSON(bs, &x) + m.Timer(metrics.RegoDataParse).Stop() + + if err != nil { + return nil, fmt.Errorf("%s: %w", path, err) + } + return x, nil +} + +func loadYAML(path string, bs []byte, m metrics.Metrics) (interface{}, error) { + m.Timer(metrics.RegoDataParse).Start() + bs, err := yaml.YAMLToJSON(bs) + m.Timer(metrics.RegoDataParse).Stop() + if err != nil { + return nil, fmt.Errorf("%v: error converting YAML to JSON: %v", path, err) + } + return loadJSON(path, bs, m) +} + +func makeDir(path []string, x interface{}) (map[string]interface{}, bool) { + if len(path) == 0 { + obj, ok := x.(map[string]interface{}) + if !ok { + return nil, false + } + return obj, true + } + return makeDir(path[:len(path)-1], map[string]interface{}{path[len(path)-1]: x}) +} + +// isUNC reports whether path is a UNC path. +func isUNC(path string) bool { + return len(path) > 1 && isSlash(path[0]) && isSlash(path[1]) +} + +func isSlash(c uint8) bool { + return c == '\\' || c == '/' +} + +func checkForUNCPath(path string) error { + if isUNC(path) { + return fmt.Errorf("UNC path read is not allowed: %s", path) + } + return nil +} diff --git a/vendor/github.com/open-policy-agent/opa/logging/logging.go b/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/logging/logging.go rename to vendor/github.com/open-policy-agent/opa/v1/logging/logging.go diff --git a/vendor/github.com/open-policy-agent/opa/metrics/metrics.go b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go similarity index 97% rename from vendor/github.com/open-policy-agent/opa/metrics/metrics.go rename to vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go index 53cd606a363..f1038e8bcb9 100644 --- a/vendor/github.com/open-policy-agent/opa/metrics/metrics.go +++ b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go @@ -8,7 +8,7 @@ package metrics import ( "encoding/json" "fmt" - "sort" + "slices" "strings" "sync" "sync/atomic" @@ -94,8 +94,8 @@ func (m *metrics) String() string { }) } - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Key < sorted[j].Key + slices.SortFunc(sorted, func(a, b metric) int { + return strings.Compare(a.Key, b.Key) }) buf := make([]string, len(sorted)) @@ -178,7 +178,7 @@ func (m *metrics) Clear() { m.counters = map[string]Counter{} } -func (m *metrics) formatKey(name string, metrics interface{}) string { +func (*metrics) formatKey(name string, metrics interface{}) string { switch metrics.(type) { case Timer: return "timer_" + name + "_ns" diff --git a/vendor/github.com/open-policy-agent/opa/plugins/plugins.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go similarity index 94% rename from vendor/github.com/open-policy-agent/opa/plugins/plugins.go rename to vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go index 567acfb817a..7e8b900bfcf 100644 --- a/vendor/github.com/open-policy-agent/opa/plugins/plugins.go +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go @@ -19,22 +19,22 @@ import ( "github.com/gorilla/mux" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/bundle" - "github.com/open-policy-agent/opa/config" - "github.com/open-policy-agent/opa/hooks" bundleUtils "github.com/open-policy-agent/opa/internal/bundle" cfg "github.com/open-policy-agent/opa/internal/config" initload "github.com/open-policy-agent/opa/internal/runtime/init" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/loader" - "github.com/open-policy-agent/opa/logging" - "github.com/open-policy-agent/opa/plugins/rest" - "github.com/open-policy-agent/opa/resolver/wasm" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/config" + "github.com/open-policy-agent/opa/v1/hooks" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/loader" + "github.com/open-policy-agent/opa/v1/logging" + "github.com/open-policy-agent/opa/v1/plugins/rest" + "github.com/open-policy-agent/opa/v1/resolver/wasm" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" ) // Factory defines the interface OPA uses to instantiate your plugin. @@ -163,6 +163,14 @@ func (s *Status) String() string { return fmt.Sprintf("{%v %q}", s.State, s.Message) } +func (s *Status) Equal(other *Status) bool { + if s == nil || other == nil { + return s == nil && other == nil + } + + return s.State == other.State && s.Message == other.Message +} + // StatusListener defines a handler to register for status updates. type StatusListener func(status map[string]*Status) @@ -440,6 +448,11 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M f(m) } + if m.parserOptions.RegoVersion == ast.RegoUndefined { + // Default to v1 if rego-version is not set through options + m.parserOptions.RegoVersion = ast.DefaultRegoVersion + } + if m.logger == nil { m.logger = logging.Get() } @@ -472,13 +485,7 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M return nil, err } - serviceOpts := cfg.ServiceOptions{ - Raw: parsedConfig.Services, - AuthPlugin: m.AuthPlugin, - Keys: m.keys, - Logger: m.logger, - DistributedTacingOpts: m.distributedTacingOpts, - } + serviceOpts := m.DefaultServiceOpts(parsedConfig) m.services, err = cfg.ParseServicesConfig(serviceOpts) if err != nil { @@ -494,8 +501,8 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M m.reporter.RegisterGatherer("min_compatible_version", func(_ context.Context) (any, error) { var minimumCompatibleVersion string - if m.compiler != nil && m.compiler.Required != nil { - minimumCompatibleVersion, _ = m.compiler.Required.MinimumCompatibleVersion() + if c := m.GetCompiler(); c != nil && c.Required != nil { + minimumCompatibleVersion, _ = c.Required.MinimumCompatibleVersion() } return minimumCompatibleVersion, nil }) @@ -537,6 +544,7 @@ func (m *Manager) Init(ctx context.Context) error { Bundles: m.initBundles, MaxErrors: m.maxErrors, EnablePrintStatements: m.enablePrintStatements, + ParserOptions: m.parserOptions, }) if err != nil { @@ -746,14 +754,19 @@ func (m *Manager) Stop(ctx context.Context) { } } -// Reconfigure updates the configuration on the manager. -func (m *Manager) Reconfigure(config *config.Config) error { - opts := cfg.ServiceOptions{ +func (m *Manager) DefaultServiceOpts(config *config.Config) cfg.ServiceOptions { + return cfg.ServiceOptions{ Raw: config.Services, AuthPlugin: m.AuthPlugin, Logger: m.logger, + Keys: m.keys, DistributedTacingOpts: m.distributedTacingOpts, } +} + +// Reconfigure updates the configuration on the manager. +func (m *Manager) Reconfigure(config *config.Config) error { + opts := m.DefaultServiceOpts(config) keys, err := keys.ParseKeysConfig(config.Keys) if err != nil { @@ -790,7 +803,7 @@ func (m *Manager) Reconfigure(config *config.Config) error { m.Config = config m.interQueryBuiltinCacheConfig = interQueryBuiltinCacheConfig - for name, client := range services { + for name, client := range services { //nolint:gocritic m.services[name] = client } @@ -939,7 +952,13 @@ func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage modules[policy] = module } - compiler := ast.NewCompiler().WithEnablePrintStatements(enablePrintStatements) + compiler := ast.NewCompiler(). + WithEnablePrintStatements(enablePrintStatements) + + if popts.RegoVersion != ast.RegoUndefined { + compiler = compiler.WithDefaultRegoVersion(popts.RegoVersion) + } + compiler.Compile(modules) return compiler, nil } diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go similarity index 96% rename from vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go index 11e72001a21..7ef9bf7dfd9 100644 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go @@ -33,8 +33,8 @@ import ( "github.com/open-policy-agent/opa/internal/jwx/jws/sign" "github.com/open-policy-agent/opa/internal/providers/aws" "github.com/open-policy-agent/opa/internal/uuid" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/logging" ) const ( @@ -126,10 +126,14 @@ type bearerAuthPlugin struct { // encode is set to true for the OCIDownloader because // it expects tokens in plain text but needs them in base64. encode bool + logger logging.Logger } func (ap *bearerAuthPlugin) NewClient(c Config) (*http.Client, error) { t, err := DefaultTLSConfig(c) + + ap.logger = c.logger + if err != nil { return nil, err } @@ -153,6 +157,9 @@ func (ap *bearerAuthPlugin) NewClient(c Config) (*http.Client, error) { func (ap *bearerAuthPlugin) Prepare(req *http.Request) error { token := ap.Token + if ap.logger == nil { + ap.logger = logging.Get() + } if ap.TokenPath != "" { bytes, err := os.ReadFile(ap.TokenPath) @@ -166,7 +173,12 @@ func (ap *bearerAuthPlugin) Prepare(req *http.Request) error { token = base64.StdEncoding.EncodeToString([]byte(token)) } - req.Header.Add("Authorization", fmt.Sprintf("%v %v", ap.Scheme, token)) + if req.Response != nil && (req.Response.StatusCode == http.StatusPermanentRedirect || req.Response.StatusCode == http.StatusTemporaryRedirect) { + ap.logger.Debug("not attaching authorization header as the response contains a redirect") + } else { + ap.logger.Debug("attaching authorization header") + req.Header.Add("Authorization", fmt.Sprintf("%v %v", ap.Scheme, token)) + } return nil } @@ -194,7 +206,7 @@ func convertSignatureToBase64(alg string, der []byte) (string, error) { return signatureData, nil } -func pointsFromDER(der []byte) (R, S *big.Int, err error) { +func pointsFromDER(der []byte) (R, S *big.Int, err error) { //nolint:gocritic R, S = &big.Int{}, &big.Int{} data := asn1.RawValue{} if _, err := asn1.Unmarshal(der, &data); err != nil { @@ -364,7 +376,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(ctx context.Context, return &jwt, nil } -func (ap *oauth2ClientCredentialsAuthPlugin) mapKMSAlgToSign(alg string) (string, error) { +func (*oauth2ClientCredentialsAuthPlugin) mapKMSAlgToSign(alg string) (string, error) { switch alg { case "ECDSA_SHA_256": return "ES256", nil @@ -382,12 +394,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) SignWithKMS(ctx context.Context, pa encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf) encodedPayload := base64.RawURLEncoding.EncodeToString(payload) - input := strings.Join( - []string{ - encodedHdr, - encodedPayload, - }, ".", - ) + input := encodedHdr + "." + encodedPayload digest, err := messageDigest([]byte(input), ap.AWSKmsKey.Algorithm) if err != nil { return nil, err @@ -616,7 +623,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) ( return nil, err } - if strings.ToLower(tokenResponse.TokenType) != "bearer" { + if !strings.EqualFold(tokenResponse.TokenType, "bearer") { return nil, errors.New("unknown token type returned from token endpoint") } @@ -751,7 +758,7 @@ func (ap *clientTLSAuthPlugin) NewClient(c Config) (*http.Client, error) { return client, nil } -func (ap *clientTLSAuthPlugin) Prepare(_ *http.Request) error { +func (*clientTLSAuthPlugin) Prepare(_ *http.Request) error { return nil } diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go similarity index 99% rename from vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go index cc45dfa9c7c..a610a8014b6 100644 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go @@ -19,7 +19,7 @@ import ( "github.com/go-ini/ini" "github.com/open-policy-agent/opa/internal/providers/aws" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) const ( @@ -69,7 +69,7 @@ type awsEnvironmentCredentialService struct { logger logging.Logger } -func (cs *awsEnvironmentCredentialService) credentials(context.Context) (aws.Credentials, error) { +func (*awsEnvironmentCredentialService) credentials(context.Context) (aws.Credentials, error) { var creds aws.Credentials creds.AccessKey = os.Getenv(accessKeyEnvVar) if creds.AccessKey == "" { @@ -678,7 +678,7 @@ func (ap *ecrAuthPlugin) Prepare(r *http.Request) error { ap.logger.Debug("Signing request with ECR authorization token") - r.Header.Set("Authorization", fmt.Sprintf("Basic %s", ap.token.AuthorizationToken)) + r.Header.Set("Authorization", "Basic "+ap.token.AuthorizationToken) return nil } diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go similarity index 94% rename from vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go index fd59058ca1e..e5d8e0f0d68 100644 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go @@ -12,16 +12,17 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "net/http/httputil" "reflect" "strings" "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/logging" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/util" ) const ( @@ -94,7 +95,7 @@ func (c *Config) AuthPlugin(lookup AuthPluginLookupFunc) (HTTPAuthPlugin, error) } // reflection avoids need for this code to change as auth plugins are added s := reflect.ValueOf(c.Credentials) - for i := 0; i < s.NumField(); i++ { + for i := range s.NumField() { if s.Field(i).IsNil() { continue } @@ -293,7 +294,7 @@ func (c Client) Do(ctx context.Context, method, path string) (*http.Response, er } url := c.config.URL + "/" + path - req, err := http.NewRequest(method, url, body) + req, err := http.NewRequestWithContext(ctx, method, url, body) if err != nil { return nil, err } @@ -303,23 +304,16 @@ func (c Client) Do(ctx context.Context, method, path string) (*http.Response, er } // Copy custom headers from config. - for key, value := range c.config.Headers { - headers[key] = value - } + maps.Copy(headers, c.config.Headers) // Overwrite with headers set directly on client. - for key, value := range c.headers { - headers[key] = value - } + maps.Copy(headers, c.headers) for key, value := range headers { req.Header.Add(key, value) } - req = req.WithContext(ctx) - - err = c.config.authPrepare(req, c.authPluginLookup) - if err != nil { + if err = c.config.authPrepare(req, c.authPluginLookup); err != nil { return nil, err } @@ -347,7 +341,7 @@ func (c Client) Do(ctx context.Context, method, path string) (*http.Response, er return nil, err } - if len(string(dump)) < defaultResponseSizeLimitBytes { + if len(dump) < defaultResponseSizeLimitBytes { c.loggerFields["response"] = string(dump) } else { c.loggerFields["response"] = fmt.Sprintf("%v...", string(dump[:defaultResponseSizeLimitBytes])) diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go b/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go new file mode 100644 index 00000000000..dcc5e2679d1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go @@ -0,0 +1,24 @@ +package rego + +// HaltError is an error type to return from a custom function implementation +// that will abort the evaluation process (analogous to topdown.Halt). +type HaltError struct { + err error +} + +// Error delegates to the wrapped error +func (h *HaltError) Error() string { + return h.err.Error() +} + +// NewHaltError wraps an error such that the evaluation process will stop +// when it occurs. +func NewHaltError(err error) error { + return &HaltError{err: err} +} + +// ErrorDetails interface is satisfied by an error that provides further +// details. +type ErrorDetails interface { + Lines() []string +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go b/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go new file mode 100644 index 00000000000..55b5ed7803a --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go @@ -0,0 +1,43 @@ +// Copyright 2023 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package rego + +import ( + "context" + "sync" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ir" +) + +var targetPlugins = map[string]TargetPlugin{} +var pluginMtx sync.Mutex + +type TargetPlugin interface { + IsTarget(string) bool + PrepareForEval(context.Context, *ir.Policy, ...PrepareOption) (TargetPluginEval, error) +} + +type TargetPluginEval interface { + Eval(context.Context, *EvalContext, ast.Value) (ast.Value, error) +} + +func (*Rego) targetPlugin(tgt string) TargetPlugin { + for _, p := range targetPlugins { + if p.IsTarget(tgt) { + return p + } + } + return nil +} + +func RegisterPlugin(name string, p TargetPlugin) { + pluginMtx.Lock() + defer pluginMtx.Unlock() + if _, ok := targetPlugins[name]; ok { + panic("plugin already registered " + name) + } + targetPlugins[name] = p +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go b/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go new file mode 100644 index 00000000000..fae39273af8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go @@ -0,0 +1,2944 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package rego exposes high level APIs for evaluating Rego policies. +package rego + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "maps" + "strings" + "time" + + bundleUtils "github.com/open-policy-agent/opa/internal/bundle" + "github.com/open-policy-agent/opa/internal/compiler/wasm" + "github.com/open-policy-agent/opa/internal/future" + "github.com/open-policy-agent/opa/internal/planner" + "github.com/open-policy-agent/opa/internal/rego/opa" + "github.com/open-policy-agent/opa/internal/wasm/encoding" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/ir" + "github.com/open-policy-agent/opa/v1/loader" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/plugins" + "github.com/open-policy-agent/opa/v1/resolver" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/inmem" + "github.com/open-policy-agent/opa/v1/topdown" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + defaultPartialNamespace = "partial" + wasmVarPrefix = "^" +) + +// nolint: deadcode,varcheck +const ( + targetWasm = "wasm" + targetRego = "rego" +) + +// CompileResult represents the result of compiling a Rego query, zero or more +// Rego modules, and arbitrary contextual data into an executable. +type CompileResult struct { + Bytes []byte `json:"bytes"` +} + +// PartialQueries contains the queries and support modules produced by partial +// evaluation. +type PartialQueries struct { + Queries []ast.Body `json:"queries,omitempty"` + Support []*ast.Module `json:"modules,omitempty"` +} + +// PartialResult represents the result of partial evaluation. The result can be +// used to generate a new query that can be run when inputs are known. +type PartialResult struct { + compiler *ast.Compiler + store storage.Store + body ast.Body + builtinDecls map[string]*ast.Builtin + builtinFuncs map[string]*topdown.Builtin +} + +// Rego returns an object that can be evaluated to produce a query result. +func (pr PartialResult) Rego(options ...func(*Rego)) *Rego { + options = append(options, Compiler(pr.compiler), Store(pr.store), ParsedQuery(pr.body)) + r := New(options...) + + // Propagate any custom builtins. + maps.Copy(r.builtinDecls, pr.builtinDecls) + maps.Copy(r.builtinFuncs, pr.builtinFuncs) + return r +} + +// preparedQuery is a wrapper around a Rego object which has pre-processed +// state stored on it. Once prepared there are a more limited number of actions +// that can be taken with it. It will, however, be able to evaluate faster since +// it will not have to re-parse or compile as much. +type preparedQuery struct { + r *Rego + cfg *PrepareConfig +} + +// EvalContext defines the set of options allowed to be set at evaluation +// time. Any other options will need to be set on a new Rego object. +type EvalContext struct { + hasInput bool + time time.Time + seed io.Reader + rawInput *interface{} + parsedInput ast.Value + metrics metrics.Metrics + txn storage.Transaction + instrument bool + instrumentation *topdown.Instrumentation + partialNamespace string + queryTracers []topdown.QueryTracer + compiledQuery compiledQuery + unknowns []string + disableInlining []ast.Ref + nondeterministicBuiltins bool + parsedUnknowns []*ast.Term + indexing bool + earlyExit bool + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + ndBuiltinCache builtins.NDBCache + resolvers []refResolver + httpRoundTripper topdown.CustomizeRoundTripper + sortSets bool + copyMaps bool + printHook print.Hook + capabilities *ast.Capabilities + strictBuiltinErrors bool + virtualCache topdown.VirtualCache + baseCache topdown.BaseCache +} + +func (e *EvalContext) RawInput() *interface{} { + return e.rawInput +} + +func (e *EvalContext) ParsedInput() ast.Value { + return e.parsedInput +} + +func (e *EvalContext) Time() time.Time { + return e.time +} + +func (e *EvalContext) Seed() io.Reader { + return e.seed +} + +func (e *EvalContext) InterQueryBuiltinCache() cache.InterQueryCache { + return e.interQueryBuiltinCache +} + +func (e *EvalContext) InterQueryBuiltinValueCache() cache.InterQueryValueCache { + return e.interQueryBuiltinValueCache +} + +func (e *EvalContext) PrintHook() print.Hook { + return e.printHook +} + +func (e *EvalContext) Metrics() metrics.Metrics { + return e.metrics +} + +func (e *EvalContext) StrictBuiltinErrors() bool { + return e.strictBuiltinErrors +} + +func (e *EvalContext) NDBCache() builtins.NDBCache { + return e.ndBuiltinCache +} + +func (e *EvalContext) CompiledQuery() ast.Body { + return e.compiledQuery.query +} + +func (e *EvalContext) Capabilities() *ast.Capabilities { + return e.capabilities +} + +func (e *EvalContext) Transaction() storage.Transaction { + return e.txn +} + +// EvalOption defines a function to set an option on an EvalConfig +type EvalOption func(*EvalContext) + +// EvalInput configures the input for a Prepared Query's evaluation +func EvalInput(input interface{}) EvalOption { + return func(e *EvalContext) { + e.rawInput = &input + e.hasInput = true + } +} + +// EvalParsedInput configures the input for a Prepared Query's evaluation +func EvalParsedInput(input ast.Value) EvalOption { + return func(e *EvalContext) { + e.parsedInput = input + e.hasInput = true + } +} + +// EvalMetrics configures the metrics for a Prepared Query's evaluation +func EvalMetrics(metric metrics.Metrics) EvalOption { + return func(e *EvalContext) { + e.metrics = metric + } +} + +// EvalTransaction configures the Transaction for a Prepared Query's evaluation +func EvalTransaction(txn storage.Transaction) EvalOption { + return func(e *EvalContext) { + e.txn = txn + } +} + +// EvalInstrument enables or disables instrumenting for a Prepared Query's evaluation +func EvalInstrument(instrument bool) EvalOption { + return func(e *EvalContext) { + e.instrument = instrument + } +} + +// EvalTracer configures a tracer for a Prepared Query's evaluation +// Deprecated: Use EvalQueryTracer instead. +func EvalTracer(tracer topdown.Tracer) EvalOption { + return func(e *EvalContext) { + if tracer != nil { + e.queryTracers = append(e.queryTracers, topdown.WrapLegacyTracer(tracer)) + } + } +} + +// EvalQueryTracer configures a tracer for a Prepared Query's evaluation +func EvalQueryTracer(tracer topdown.QueryTracer) EvalOption { + return func(e *EvalContext) { + if tracer != nil { + e.queryTracers = append(e.queryTracers, tracer) + } + } +} + +// EvalPartialNamespace returns an argument that sets the namespace to use for +// partial evaluation results. The namespace must be a valid package path +// component. +func EvalPartialNamespace(ns string) EvalOption { + return func(e *EvalContext) { + e.partialNamespace = ns + } +} + +// EvalUnknowns returns an argument that sets the values to treat as +// unknown during partial evaluation. +func EvalUnknowns(unknowns []string) EvalOption { + return func(e *EvalContext) { + e.unknowns = unknowns + } +} + +// EvalDisableInlining returns an argument that adds a set of paths to exclude from +// partial evaluation inlining. +func EvalDisableInlining(paths []ast.Ref) EvalOption { + return func(e *EvalContext) { + e.disableInlining = paths + } +} + +// EvalParsedUnknowns returns an argument that sets the values to treat +// as unknown during partial evaluation. +func EvalParsedUnknowns(unknowns []*ast.Term) EvalOption { + return func(e *EvalContext) { + e.parsedUnknowns = unknowns + } +} + +// EvalRuleIndexing will disable indexing optimizations for the +// evaluation. This should only be used when tracing in debug mode. +func EvalRuleIndexing(enabled bool) EvalOption { + return func(e *EvalContext) { + e.indexing = enabled + } +} + +// EvalEarlyExit will disable 'early exit' optimizations for the +// evaluation. This should only be used when tracing in debug mode. +func EvalEarlyExit(enabled bool) EvalOption { + return func(e *EvalContext) { + e.earlyExit = enabled + } +} + +// EvalTime sets the wall clock time to use during policy evaluation. +// time.now_ns() calls will return this value. +func EvalTime(x time.Time) EvalOption { + return func(e *EvalContext) { + e.time = x + } +} + +// EvalSeed sets a reader that will seed randomization required by built-in functions. +// If a seed is not provided crypto/rand.Reader is used. +func EvalSeed(r io.Reader) EvalOption { + return func(e *EvalContext) { + e.seed = r + } +} + +// EvalInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize +// during evaluation. +func EvalInterQueryBuiltinCache(c cache.InterQueryCache) EvalOption { + return func(e *EvalContext) { + e.interQueryBuiltinCache = c + } +} + +// EvalInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize +// during evaluation. +func EvalInterQueryBuiltinValueCache(c cache.InterQueryValueCache) EvalOption { + return func(e *EvalContext) { + e.interQueryBuiltinValueCache = c + } +} + +// EvalNDBuiltinCache sets the non-deterministic builtin cache that built-in functions can +// use during evaluation. +func EvalNDBuiltinCache(c builtins.NDBCache) EvalOption { + return func(e *EvalContext) { + e.ndBuiltinCache = c + } +} + +// EvalResolver sets a Resolver for a specified ref path for this evaluation. +func EvalResolver(ref ast.Ref, r resolver.Resolver) EvalOption { + return func(e *EvalContext) { + e.resolvers = append(e.resolvers, refResolver{ref, r}) + } +} + +// EvalHTTPRoundTripper allows customizing the http.RoundTripper for this evaluation. +func EvalHTTPRoundTripper(t topdown.CustomizeRoundTripper) EvalOption { + return func(e *EvalContext) { + e.httpRoundTripper = t + } +} + +// EvalSortSets causes the evaluator to sort sets before returning them as JSON arrays. +func EvalSortSets(yes bool) EvalOption { + return func(e *EvalContext) { + e.sortSets = yes + } +} + +// EvalCopyMaps causes the evaluator to copy `map[string]interface{}`s before returning them. +func EvalCopyMaps(yes bool) EvalOption { + return func(e *EvalContext) { + e.copyMaps = yes + } +} + +// EvalPrintHook sets the object to use for handling print statement outputs. +func EvalPrintHook(ph print.Hook) EvalOption { + return func(e *EvalContext) { + e.printHook = ph + } +} + +// EvalVirtualCache sets the topdown.VirtualCache to use for evaluation. +// This is optional, and if not set, the default cache is used. +func EvalVirtualCache(vc topdown.VirtualCache) EvalOption { + return func(e *EvalContext) { + e.virtualCache = vc + } +} + +// EvalBaseCache sets the topdown.BaseCache to use for evaluation. +// This is optional, and if not set, the default cache is used. +func EvalBaseCache(bc topdown.BaseCache) EvalOption { + return func(e *EvalContext) { + e.baseCache = bc + } +} + +// EvalNondeterministicBuiltins causes non-deterministic builtins to be evalued +// during partial evaluation. This is needed to pull in external data, or validate +// a JWT, during PE, so that the result informs what queries are returned. +func EvalNondeterministicBuiltins(yes bool) EvalOption { + return func(e *EvalContext) { + e.nondeterministicBuiltins = yes + } +} + +func (pq preparedQuery) Modules() map[string]*ast.Module { + mods := make(map[string]*ast.Module) + + for name, mod := range pq.r.parsedModules { + mods[name] = mod + } + + for _, b := range pq.r.bundles { + for _, mod := range b.Modules { + mods[mod.Path] = mod.Parsed + } + } + + return mods +} + +// newEvalContext creates a new EvalContext overlaying any EvalOptions over top +// the Rego object on the preparedQuery. The returned function should be called +// once the evaluation is complete to close any transactions that might have +// been opened. +func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) { + ectx := &EvalContext{ + hasInput: false, + rawInput: nil, + parsedInput: nil, + metrics: nil, + txn: nil, + instrument: false, + instrumentation: nil, + partialNamespace: pq.r.partialNamespace, + queryTracers: nil, + unknowns: pq.r.unknowns, + parsedUnknowns: pq.r.parsedUnknowns, + nondeterministicBuiltins: pq.r.nondeterministicBuiltins, + compiledQuery: compiledQuery{}, + indexing: true, + earlyExit: true, + resolvers: pq.r.resolvers, + printHook: pq.r.printHook, + capabilities: pq.r.capabilities, + strictBuiltinErrors: pq.r.strictBuiltinErrors, + } + + for _, o := range options { + o(ectx) + } + + if ectx.metrics == nil { + ectx.metrics = metrics.New() + } + + if ectx.instrument { + ectx.instrumentation = topdown.NewInstrumentation(ectx.metrics) + } + + // Default to an empty "finish" function + finishFunc := func(context.Context) {} + + var err error + ectx.disableInlining, err = parseStringsToRefs(pq.r.disableInlining) + if err != nil { + return nil, finishFunc, err + } + + if ectx.txn == nil { + ectx.txn, err = pq.r.store.NewTransaction(ctx) + if err != nil { + return nil, finishFunc, err + } + finishFunc = func(ctx context.Context) { + pq.r.store.Abort(ctx, ectx.txn) + } + } + + // If we didn't get an input specified in the Eval options + // then fall back to the Rego object's input fields. + if !ectx.hasInput { + ectx.rawInput = pq.r.rawInput + ectx.parsedInput = pq.r.parsedInput + } + + if ectx.parsedInput == nil { + if ectx.rawInput == nil { + // Fall back to the original Rego objects input if none was specified + // Note that it could still be nil + ectx.rawInput = pq.r.rawInput + } + + if pq.r.targetPlugin(pq.r.target) == nil && // no plugin claims this target + pq.r.target != targetWasm { + ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics) + if err != nil { + return nil, finishFunc, err + } + } + } + + return ectx, finishFunc, nil +} + +// PreparedEvalQuery holds the prepared Rego state that has been pre-processed +// for subsequent evaluations. +type PreparedEvalQuery struct { + preparedQuery +} + +// Eval evaluates this PartialResult's Rego object with additional eval options +// and returns a ResultSet. +// If options are provided they will override the original Rego options respective value. +// The original Rego object transaction will *not* be re-used. A new transaction will be opened +// if one is not provided with an EvalOption. +func (pq PreparedEvalQuery) Eval(ctx context.Context, options ...EvalOption) (ResultSet, error) { + ectx, finish, err := pq.newEvalContext(ctx, options) + if err != nil { + return nil, err + } + defer finish(ctx) + + ectx.compiledQuery = pq.r.compiledQueries[evalQueryType] + + return pq.r.eval(ctx, ectx) +} + +// PreparedPartialQuery holds the prepared Rego state that has been pre-processed +// for partial evaluations. +type PreparedPartialQuery struct { + preparedQuery +} + +// Partial runs partial evaluation on the prepared query and returns the result. +// The original Rego object transaction will *not* be re-used. A new transaction will be opened +// if one is not provided with an EvalOption. +func (pq PreparedPartialQuery) Partial(ctx context.Context, options ...EvalOption) (*PartialQueries, error) { + ectx, finish, err := pq.newEvalContext(ctx, options) + if err != nil { + return nil, err + } + defer finish(ctx) + + ectx.compiledQuery = pq.r.compiledQueries[partialQueryType] + + return pq.r.partial(ctx, ectx) +} + +// Errors represents a collection of errors returned when evaluating Rego. +type Errors []error + +func (errs Errors) Error() string { + if len(errs) == 0 { + return "no error" + } + if len(errs) == 1 { + return fmt.Sprintf("1 error occurred: %v", errs[0].Error()) + } + buf := []string{fmt.Sprintf("%v errors occurred", len(errs))} + for _, err := range errs { + buf = append(buf, err.Error()) + } + return strings.Join(buf, "\n") +} + +var errPartialEvaluationNotEffective = errors.New("partial evaluation not effective") + +// IsPartialEvaluationNotEffectiveErr returns true if err is an error returned by +// this package to indicate that partial evaluation was ineffective. +func IsPartialEvaluationNotEffectiveErr(err error) bool { + errs, ok := err.(Errors) + if !ok { + return false + } + return len(errs) == 1 && errs[0] == errPartialEvaluationNotEffective +} + +type compiledQuery struct { + query ast.Body + compiler ast.QueryCompiler +} + +type queryType int + +// Define a query type for each of the top level Rego +// API's that compile queries differently. +const ( + evalQueryType queryType = iota + partialResultQueryType + partialQueryType + compileQueryType +) + +type loadPaths struct { + paths []string + filter loader.Filter +} + +// Rego constructs a query and can be evaluated to obtain results. +type Rego struct { + query string + parsedQuery ast.Body + compiledQueries map[queryType]compiledQuery + pkg string + parsedPackage *ast.Package + imports []string + parsedImports []*ast.Import + rawInput *interface{} + parsedInput ast.Value + unknowns []string + parsedUnknowns []*ast.Term + disableInlining []string + shallowInlining bool + nondeterministicBuiltins bool + skipPartialNamespace bool + partialNamespace string + modules []rawModule + parsedModules map[string]*ast.Module + compiler *ast.Compiler + store storage.Store + ownStore bool + ownStoreReadAst bool + txn storage.Transaction + metrics metrics.Metrics + queryTracers []topdown.QueryTracer + tracebuf *topdown.BufferTracer + trace bool + instrumentation *topdown.Instrumentation + instrument bool + capture map[*ast.Expr]ast.Var // map exprs to generated capture vars + termVarID int + dump io.Writer + runtime *ast.Term + time time.Time + seed io.Reader + capabilities *ast.Capabilities + builtinDecls map[string]*ast.Builtin + builtinFuncs map[string]*topdown.Builtin + unsafeBuiltins map[string]struct{} + loadPaths loadPaths + bundlePaths []string + bundles map[string]*bundle.Bundle + skipBundleVerification bool + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + ndBuiltinCache builtins.NDBCache + strictBuiltinErrors bool + builtinErrorList *[]topdown.Error + resolvers []refResolver + schemaSet *ast.SchemaSet + target string // target type (wasm, rego, etc.) + opa opa.EvalEngine + generateJSON func(*ast.Term, *EvalContext) (interface{}, error) + printHook print.Hook + enablePrintStatements bool + distributedTacingOpts tracing.Options + strict bool + pluginMgr *plugins.Manager + plugins []TargetPlugin + targetPrepState TargetPluginEval + regoVersion ast.RegoVersion +} + +func (r *Rego) RegoVersion() ast.RegoVersion { + return r.regoVersion +} + +// Function represents a built-in function that is callable in Rego. +type Function struct { + Name string + Description string + Decl *types.Function + Memoize bool + Nondeterministic bool +} + +// BuiltinContext contains additional attributes from the evaluator that +// built-in functions can use, e.g., the request context.Context, caches, etc. +type BuiltinContext = topdown.BuiltinContext + +type ( + // Builtin1 defines a built-in function that accepts 1 argument. + Builtin1 func(bctx BuiltinContext, op1 *ast.Term) (*ast.Term, error) + + // Builtin2 defines a built-in function that accepts 2 arguments. + Builtin2 func(bctx BuiltinContext, op1, op2 *ast.Term) (*ast.Term, error) + + // Builtin3 defines a built-in function that accepts 3 argument. + Builtin3 func(bctx BuiltinContext, op1, op2, op3 *ast.Term) (*ast.Term, error) + + // Builtin4 defines a built-in function that accepts 4 argument. + Builtin4 func(bctx BuiltinContext, op1, op2, op3, op4 *ast.Term) (*ast.Term, error) + + // BuiltinDyn defines a built-in function that accepts a list of arguments. + BuiltinDyn func(bctx BuiltinContext, terms []*ast.Term) (*ast.Term, error) +) + +// RegisterBuiltin1 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin1(decl *Function, impl Builtin1) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltin2 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin2(decl *Function, impl Builtin2) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltin3 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin3(decl *Function, impl Builtin3) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltin4 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin4(decl *Function, impl Builtin4) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2], terms[3]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltinDyn adds a built-in function globally inside the OPA runtime. +func RegisterBuiltinDyn(decl *Function, impl BuiltinDyn) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function1 returns an option that adds a built-in function to the Rego object. +func Function1(decl *Function, f Builtin1) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function2 returns an option that adds a built-in function to the Rego object. +func Function2(decl *Function, f Builtin2) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function3 returns an option that adds a built-in function to the Rego object. +func Function3(decl *Function, f Builtin3) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function4 returns an option that adds a built-in function to the Rego object. +func Function4(decl *Function, f Builtin4) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2], terms[3]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// FunctionDyn returns an option that adds a built-in function to the Rego object. +func FunctionDyn(decl *Function, f BuiltinDyn) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// FunctionDecl returns an option that adds a custom-built-in function +// __declaration__. NO implementation is provided. This is used for +// non-interpreter execution envs (e.g., Wasm). +func FunctionDecl(decl *Function) func(*Rego) { + return newDecl(decl) +} + +func newDecl(decl *Function) func(*Rego) { + return func(r *Rego) { + r.builtinDecls[decl.Name] = &ast.Builtin{ + Name: decl.Name, + Decl: decl.Decl, + } + } +} + +type memo struct { + term *ast.Term + err error +} + +type memokey string + +func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty func() (*ast.Term, error)) (*ast.Term, error) { + + if !decl.Memoize { + return ifEmpty() + } + + // NOTE(tsandall): we assume memoization is applied to infrequent built-in + // calls that do things like fetch data from remote locations. As such, + // converting the terms to strings is acceptable for now. + var b strings.Builder + if _, err := b.WriteString(decl.Name); err != nil { + return nil, err + } + + // The term slice _may_ include an output term depending on how the caller + // referred to the built-in function. Only use the arguments as the cache + // key. Unification ensures we don't get false positive matches. + for i := range decl.Decl.Arity() { + if _, err := b.WriteString(terms[i].String()); err != nil { + return nil, err + } + } + + key := memokey(b.String()) + hit, ok := bctx.Cache.Get(key) + var m memo + if ok { + m = hit.(memo) + } else { + m.term, m.err = ifEmpty() + bctx.Cache.Put(key, m) + } + + return m.term, m.err +} + +// Dump returns an argument that sets the writer to dump debugging information to. +func Dump(w io.Writer) func(r *Rego) { + return func(r *Rego) { + r.dump = w + } +} + +// Query returns an argument that sets the Rego query. +func Query(q string) func(r *Rego) { + return func(r *Rego) { + r.query = q + } +} + +// ParsedQuery returns an argument that sets the Rego query. +func ParsedQuery(q ast.Body) func(r *Rego) { + return func(r *Rego) { + r.parsedQuery = q + } +} + +// Package returns an argument that sets the Rego package on the query's +// context. +func Package(p string) func(r *Rego) { + return func(r *Rego) { + r.pkg = p + } +} + +// ParsedPackage returns an argument that sets the Rego package on the query's +// context. +func ParsedPackage(pkg *ast.Package) func(r *Rego) { + return func(r *Rego) { + r.parsedPackage = pkg + } +} + +// Imports returns an argument that adds a Rego import to the query's context. +func Imports(p []string) func(r *Rego) { + return func(r *Rego) { + r.imports = append(r.imports, p...) + } +} + +// ParsedImports returns an argument that adds Rego imports to the query's +// context. +func ParsedImports(imp []*ast.Import) func(r *Rego) { + return func(r *Rego) { + r.parsedImports = append(r.parsedImports, imp...) + } +} + +// Input returns an argument that sets the Rego input document. Input should be +// a native Go value representing the input document. +func Input(x interface{}) func(r *Rego) { + return func(r *Rego) { + r.rawInput = &x + } +} + +// ParsedInput returns an argument that sets the Rego input document. +func ParsedInput(x ast.Value) func(r *Rego) { + return func(r *Rego) { + r.parsedInput = x + } +} + +// Unknowns returns an argument that sets the values to treat as unknown during +// partial evaluation. +func Unknowns(unknowns []string) func(r *Rego) { + return func(r *Rego) { + r.unknowns = unknowns + } +} + +// ParsedUnknowns returns an argument that sets the values to treat as unknown +// during partial evaluation. +func ParsedUnknowns(unknowns []*ast.Term) func(r *Rego) { + return func(r *Rego) { + r.parsedUnknowns = unknowns + } +} + +// DisableInlining adds a set of paths to exclude from partial evaluation inlining. +func DisableInlining(paths []string) func(r *Rego) { + return func(r *Rego) { + r.disableInlining = paths + } +} + +// NondeterministicBuiltins causes non-deterministic builtins to be evalued during +// partial evaluation. This is needed to pull in external data, or validate a JWT, +// during PE, so that the result informs what queries are returned. +func NondeterministicBuiltins(yes bool) func(r *Rego) { + return func(r *Rego) { + r.nondeterministicBuiltins = yes + } +} + +// ShallowInlining prevents rules that depend on unknown values from being inlined. +// Rules that only depend on known values are inlined. +func ShallowInlining(yes bool) func(r *Rego) { + return func(r *Rego) { + r.shallowInlining = yes + } +} + +// SkipPartialNamespace disables namespacing of partial evalution results for support +// rules generated from policy. Synthetic support rules are still namespaced. +func SkipPartialNamespace(yes bool) func(r *Rego) { + return func(r *Rego) { + r.skipPartialNamespace = yes + } +} + +// PartialNamespace returns an argument that sets the namespace to use for +// partial evaluation results. The namespace must be a valid package path +// component. +func PartialNamespace(ns string) func(r *Rego) { + return func(r *Rego) { + r.partialNamespace = ns + } +} + +// Module returns an argument that adds a Rego module. +func Module(filename, input string) func(r *Rego) { + return func(r *Rego) { + r.modules = append(r.modules, rawModule{ + filename: filename, + module: input, + }) + } +} + +// ParsedModule returns an argument that adds a parsed Rego module. If a string +// module with the same filename name is added, it will override the parsed +// module. +func ParsedModule(module *ast.Module) func(*Rego) { + return func(r *Rego) { + var filename string + if module.Package.Location != nil { + filename = module.Package.Location.File + } else { + filename = fmt.Sprintf("module_%p.rego", module) + } + r.parsedModules[filename] = module + } +} + +// Load returns an argument that adds a filesystem path to load data +// and Rego modules from. Any file with a *.rego, *.yaml, or *.json +// extension will be loaded. The path can be either a directory or file, +// directories are loaded recursively. The optional ignore string patterns +// can be used to filter which files are used. +// The Load option can only be used once. +// Note: Loading files will require a write transaction on the store. +func Load(paths []string, filter loader.Filter) func(r *Rego) { + return func(r *Rego) { + r.loadPaths = loadPaths{paths, filter} + } +} + +// LoadBundle returns an argument that adds a filesystem path to load +// a bundle from. The path can be a compressed bundle file or a directory +// to be loaded as a bundle. +// Note: Loading bundles will require a write transaction on the store. +func LoadBundle(path string) func(r *Rego) { + return func(r *Rego) { + r.bundlePaths = append(r.bundlePaths, path) + } +} + +// ParsedBundle returns an argument that adds a bundle to be loaded. +func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) { + return func(r *Rego) { + r.bundles[name] = b + } +} + +// Compiler returns an argument that sets the Rego compiler. +func Compiler(c *ast.Compiler) func(r *Rego) { + return func(r *Rego) { + r.compiler = c + } +} + +// Store returns an argument that sets the policy engine's data storage layer. +// +// If using the Load, LoadBundle, or ParsedBundle options then a transaction +// must also be provided via the Transaction() option. After loading files +// or bundles the transaction should be aborted or committed. +func Store(s storage.Store) func(r *Rego) { + return func(r *Rego) { + r.store = s + } +} + +// StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values. +// +// Only applicable when no store has been set on the Rego object through the Store option. +func StoreReadAST(enabled bool) func(r *Rego) { + return func(r *Rego) { + r.ownStoreReadAst = enabled + } +} + +// Transaction returns an argument that sets the transaction to use for storage +// layer operations. +// +// Requires the store associated with the transaction to be provided via the +// Store() option. If using Load(), LoadBundle(), or ParsedBundle() options +// the transaction will likely require write params. +func Transaction(txn storage.Transaction) func(r *Rego) { + return func(r *Rego) { + r.txn = txn + } +} + +// Metrics returns an argument that sets the metrics collection. +func Metrics(m metrics.Metrics) func(r *Rego) { + return func(r *Rego) { + r.metrics = m + } +} + +// Instrument returns an argument that enables instrumentation for diagnosing +// performance issues. +func Instrument(yes bool) func(r *Rego) { + return func(r *Rego) { + r.instrument = yes + } +} + +// Trace returns an argument that enables tracing on r. +func Trace(yes bool) func(r *Rego) { + return func(r *Rego) { + r.trace = yes + } +} + +// Tracer returns an argument that adds a query tracer to r. +// Deprecated: Use QueryTracer instead. +func Tracer(t topdown.Tracer) func(r *Rego) { + return func(r *Rego) { + if t != nil { + r.queryTracers = append(r.queryTracers, topdown.WrapLegacyTracer(t)) + } + } +} + +// QueryTracer returns an argument that adds a query tracer to r. +func QueryTracer(t topdown.QueryTracer) func(r *Rego) { + return func(r *Rego) { + if t != nil { + r.queryTracers = append(r.queryTracers, t) + } + } +} + +// Runtime returns an argument that sets the runtime data to provide to the +// evaluation engine. +func Runtime(term *ast.Term) func(r *Rego) { + return func(r *Rego) { + r.runtime = term + } +} + +// Time sets the wall clock time to use during policy evaluation. Prepared queries +// do not inherit this parameter. Use EvalTime to set the wall clock time when +// executing a prepared query. +func Time(x time.Time) func(r *Rego) { + return func(r *Rego) { + r.time = x + } +} + +// Seed sets a reader that will seed randomization required by built-in functions. +// If a seed is not provided crypto/rand.Reader is used. +func Seed(r io.Reader) func(*Rego) { + return func(e *Rego) { + e.seed = r + } +} + +// PrintTrace is a helper function to write a human-readable version of the +// trace to the writer w. +func PrintTrace(w io.Writer, r *Rego) { + if r == nil || r.tracebuf == nil { + return + } + topdown.PrettyTrace(w, *r.tracebuf) +} + +// PrintTraceWithLocation is a helper function to write a human-readable version of the +// trace to the writer w. +func PrintTraceWithLocation(w io.Writer, r *Rego) { + if r == nil || r.tracebuf == nil { + return + } + topdown.PrettyTraceWithLocation(w, *r.tracebuf) +} + +// UnsafeBuiltins sets the built-in functions to treat as unsafe and not allow. +// This option is ignored for module compilation if the caller supplies the +// compiler. This option is always honored for query compilation. Provide an +// empty (non-nil) map to disable checks on queries. +func UnsafeBuiltins(unsafeBuiltins map[string]struct{}) func(r *Rego) { + return func(r *Rego) { + r.unsafeBuiltins = unsafeBuiltins + } +} + +// SkipBundleVerification skips verification of a signed bundle. +func SkipBundleVerification(yes bool) func(r *Rego) { + return func(r *Rego) { + r.skipBundleVerification = yes + } +} + +// InterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize +// during evaluation. +func InterQueryBuiltinCache(c cache.InterQueryCache) func(r *Rego) { + return func(r *Rego) { + r.interQueryBuiltinCache = c + } +} + +// InterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize +// during evaluation. +func InterQueryBuiltinValueCache(c cache.InterQueryValueCache) func(r *Rego) { + return func(r *Rego) { + r.interQueryBuiltinValueCache = c + } +} + +// NDBuiltinCache sets the non-deterministic builtins cache. +func NDBuiltinCache(c builtins.NDBCache) func(r *Rego) { + return func(r *Rego) { + r.ndBuiltinCache = c + } +} + +// StrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. +func StrictBuiltinErrors(yes bool) func(r *Rego) { + return func(r *Rego) { + r.strictBuiltinErrors = yes + } +} + +// BuiltinErrorList supplies an error slice to store built-in function errors. +func BuiltinErrorList(list *[]topdown.Error) func(r *Rego) { + return func(r *Rego) { + r.builtinErrorList = list + } +} + +// Resolver sets a Resolver for a specified ref path. +func Resolver(ref ast.Ref, r resolver.Resolver) func(r *Rego) { + return func(rego *Rego) { + rego.resolvers = append(rego.resolvers, refResolver{ref, r}) + } +} + +// Schemas sets the schemaSet +func Schemas(x *ast.SchemaSet) func(r *Rego) { + return func(r *Rego) { + r.schemaSet = x + } +} + +// Capabilities configures the underlying compiler's capabilities. +// This option is ignored for module compilation if the caller supplies the +// compiler. +func Capabilities(c *ast.Capabilities) func(r *Rego) { + return func(r *Rego) { + r.capabilities = c + } +} + +// Target sets the runtime to exercise. +func Target(t string) func(r *Rego) { + return func(r *Rego) { + r.target = t + } +} + +// GenerateJSON sets the AST to JSON converter for the results. +func GenerateJSON(f func(*ast.Term, *EvalContext) (interface{}, error)) func(r *Rego) { + return func(r *Rego) { + r.generateJSON = f + } +} + +// PrintHook sets the object to use for handling print statement outputs. +func PrintHook(h print.Hook) func(r *Rego) { + return func(r *Rego) { + r.printHook = h + } +} + +// DistributedTracingOpts sets the options to be used by distributed tracing. +func DistributedTracingOpts(tr tracing.Options) func(r *Rego) { + return func(r *Rego) { + r.distributedTacingOpts = tr + } +} + +// EnablePrintStatements enables print() calls. If this option is not provided, +// print() calls will be erased from the policy. This option only applies to +// queries and policies that passed as raw strings, i.e., this function will not +// have any affect if the caller supplies the ast.Compiler instance. +func EnablePrintStatements(yes bool) func(r *Rego) { + return func(r *Rego) { + r.enablePrintStatements = yes + } +} + +// Strict enables or disables strict-mode in the compiler +func Strict(yes bool) func(r *Rego) { + return func(r *Rego) { + r.strict = yes + } +} + +func SetRegoVersion(version ast.RegoVersion) func(r *Rego) { + return func(r *Rego) { + r.regoVersion = version + } +} + +// New returns a new Rego object. +func New(options ...func(r *Rego)) *Rego { + + r := &Rego{ + parsedModules: map[string]*ast.Module{}, + capture: map[*ast.Expr]ast.Var{}, + compiledQueries: map[queryType]compiledQuery{}, + builtinDecls: map[string]*ast.Builtin{}, + builtinFuncs: map[string]*topdown.Builtin{}, + bundles: map[string]*bundle.Bundle{}, + } + + for _, option := range options { + option(r) + } + + if r.compiler == nil { + r.compiler = ast.NewCompiler(). + WithUnsafeBuiltins(r.unsafeBuiltins). + WithBuiltins(r.builtinDecls). + WithDebug(r.dump). + WithSchemas(r.schemaSet). + WithCapabilities(r.capabilities). + WithEnablePrintStatements(r.enablePrintStatements). + WithStrict(r.strict). + WithUseTypeCheckAnnotations(true) + + // topdown could be target "" or "rego", but both could be overridden by + // a target plugin (checked below) + if r.target == targetWasm { + r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) + } + + if r.regoVersion != ast.RegoUndefined { + r.compiler = r.compiler.WithDefaultRegoVersion(r.regoVersion) + } + } + + if r.store == nil { + r.store = inmem.NewWithOpts(inmem.OptReturnASTValuesOnRead(r.ownStoreReadAst)) + r.ownStore = true + } else { + r.ownStore = false + } + + if r.metrics == nil { + r.metrics = metrics.New() + } + + if r.instrument { + r.instrumentation = topdown.NewInstrumentation(r.metrics) + r.compiler.WithMetrics(r.metrics) + } + + if r.trace { + r.tracebuf = topdown.NewBufferTracer() + r.queryTracers = append(r.queryTracers, r.tracebuf) + } + + if r.partialNamespace == "" { + r.partialNamespace = defaultPartialNamespace + } + + if r.generateJSON == nil { + r.generateJSON = generateJSON + } + + if r.pluginMgr != nil { + for _, name := range r.pluginMgr.Plugins() { + p := r.pluginMgr.Plugin(name) + if p0, ok := p.(TargetPlugin); ok { + r.plugins = append(r.plugins, p0) + } + } + } + + if t := r.targetPlugin(r.target); t != nil { + r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) + } + + return r +} + +// Eval evaluates this Rego object and returns a ResultSet. +func (r *Rego) Eval(ctx context.Context) (ResultSet, error) { + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return nil, err + } + + pq, err := r.PrepareForEval(ctx) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return nil, err + } + + evalArgs := []EvalOption{ + EvalTransaction(r.txn), + EvalMetrics(r.metrics), + EvalInstrument(r.instrument), + EvalTime(r.time), + EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), + EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), + EvalSeed(r.seed), + } + + if r.ndBuiltinCache != nil { + evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) + } + + for _, qt := range r.queryTracers { + evalArgs = append(evalArgs, EvalQueryTracer(qt)) + } + + for i := range r.resolvers { + evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) + } + + rs, err := pq.Eval(ctx, evalArgs...) + txnErr := txnClose(ctx, err) // Always call closer + if err == nil { + err = txnErr + } + return rs, err +} + +// PartialEval has been deprecated and renamed to PartialResult. +func (r *Rego) PartialEval(ctx context.Context) (PartialResult, error) { + return r.PartialResult(ctx) +} + +// PartialResult partially evaluates this Rego object and returns a PartialResult. +func (r *Rego) PartialResult(ctx context.Context) (PartialResult, error) { + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return PartialResult{}, err + } + + pq, err := r.PrepareForEval(ctx, WithPartialEval()) + txnErr := txnClose(ctx, err) // Always call closer + if err != nil { + return PartialResult{}, err + } + if txnErr != nil { + return PartialResult{}, txnErr + } + + pr := PartialResult{ + compiler: pq.r.compiler, + store: pq.r.store, + body: pq.r.parsedQuery, + builtinDecls: pq.r.builtinDecls, + builtinFuncs: pq.r.builtinFuncs, + } + + return pr, nil +} + +// Partial runs partial evaluation on r and returns the result. +func (r *Rego) Partial(ctx context.Context) (*PartialQueries, error) { + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return nil, err + } + + pq, err := r.PrepareForPartial(ctx) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return nil, err + } + + evalArgs := []EvalOption{ + EvalTransaction(r.txn), + EvalMetrics(r.metrics), + EvalInstrument(r.instrument), + EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), + EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), + } + + if r.ndBuiltinCache != nil { + evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) + } + + for _, t := range r.queryTracers { + evalArgs = append(evalArgs, EvalQueryTracer(t)) + } + + for i := range r.resolvers { + evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) + } + + pqs, err := pq.Partial(ctx, evalArgs...) + txnErr := txnClose(ctx, err) // Always call closer + if err == nil { + err = txnErr + } + return pqs, err +} + +// CompileOption defines a function to set options on Compile calls. +type CompileOption func(*CompileContext) + +// CompileContext contains options for Compile calls. +type CompileContext struct { + partial bool +} + +// CompilePartial defines an option to control whether partial evaluation is run +// before the query is planned and compiled. +func CompilePartial(yes bool) CompileOption { + return func(cfg *CompileContext) { + cfg.partial = yes + } +} + +// Compile returns a compiled policy query. +func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResult, error) { + + var cfg CompileContext + + for _, opt := range opts { + opt(&cfg) + } + + var queries []ast.Body + modules := make([]*ast.Module, 0, len(r.compiler.Modules)) + + if cfg.partial { + + pq, err := r.Partial(ctx) + if err != nil { + return nil, err + } + if r.dump != nil { + if len(pq.Queries) != 0 { + msg := fmt.Sprintf("QUERIES (%d total):", len(pq.Queries)) + fmt.Fprintln(r.dump, msg) + fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) + for i := range pq.Queries { + fmt.Println(pq.Queries[i]) + } + fmt.Fprintln(r.dump) + } + if len(pq.Support) != 0 { + msg := fmt.Sprintf("SUPPORT (%d total):", len(pq.Support)) + fmt.Fprintln(r.dump, msg) + fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) + for i := range pq.Support { + fmt.Println(pq.Support[i]) + } + fmt.Fprintln(r.dump) + } + } + + queries = pq.Queries + modules = pq.Support + + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + } else { + var err error + // If creating a new transaction it should be closed before calling the + // planner to avoid holding open the transaction longer than needed. + // + // TODO(tsandall): in future, planner could make use of store, in which + // case this will need to change. + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return nil, err + } + + err = r.prepare(ctx, compileQueryType, nil) + txnErr := txnClose(ctx, err) // Always call closer + if err != nil { + return nil, err + } + if txnErr != nil { + return nil, err + } + + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + queries = []ast.Body{r.compiledQueries[compileQueryType].query} + } + + if tgt := r.targetPlugin(r.target); tgt != nil { + return nil, errors.New("unsupported for rego target plugins") + } + + return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here +} + +func (r *Rego) compileWasm(_ []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { + policy, err := r.planQuery(queries, qType) + if err != nil { + return nil, err + } + + m, err := wasm.New().WithPolicy(policy).Compile() + if err != nil { + return nil, err + } + + var out bytes.Buffer + if err := encoding.WriteModule(&out, m); err != nil { + return nil, err + } + + return &CompileResult{ + Bytes: out.Bytes(), + }, nil +} + +// PrepareOption defines a function to set an option to control +// the behavior of the Prepare call. +type PrepareOption func(*PrepareConfig) + +// PrepareConfig holds settings to control the behavior of the +// Prepare call. +type PrepareConfig struct { + doPartialEval bool + disableInlining *[]string + builtinFuncs map[string]*topdown.Builtin +} + +// WithPartialEval configures an option for PrepareForEval +// which will have it perform partial evaluation while preparing +// the query (similar to rego.Rego#PartialResult) +func WithPartialEval() PrepareOption { + return func(p *PrepareConfig) { + p.doPartialEval = true + } +} + +// WithNoInline adds a set of paths to exclude from partial evaluation inlining. +func WithNoInline(paths []string) PrepareOption { + return func(p *PrepareConfig) { + p.disableInlining = &paths + } +} + +// WithBuiltinFuncs carries the rego.Function{1,2,3} per-query function definitions +// to the target plugins. +func WithBuiltinFuncs(bis map[string]*topdown.Builtin) PrepareOption { + return func(p *PrepareConfig) { + if p.builtinFuncs == nil { + p.builtinFuncs = maps.Clone(bis) + } else { + maps.Copy(p.builtinFuncs, bis) + } + } +} + +// BuiltinFuncs allows retrieving the builtin funcs set via PrepareOption +// WithBuiltinFuncs. +func (p *PrepareConfig) BuiltinFuncs() map[string]*topdown.Builtin { + return p.builtinFuncs +} + +// PrepareForEval will parse inputs, modules, and query arguments in preparation +// of evaluating them. +func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) { + if !r.hasQuery() { + return PreparedEvalQuery{}, errors.New("cannot evaluate empty query") + } + + pCfg := &PrepareConfig{} + for _, o := range opts { + o(pCfg) + } + + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return PreparedEvalQuery{}, err + } + + // If the caller wanted to do partial evaluation as part of preparation + // do it now and use the new Rego object. + if pCfg.doPartialEval { + + pr, err := r.partialResult(ctx, pCfg) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + // Prepare the new query using the result of partial evaluation + pq, err := pr.Rego(Transaction(r.txn)).PrepareForEval(ctx) + txnErr := txnClose(ctx, err) + if err != nil { + return pq, err + } + return pq, txnErr + } + + err = r.prepare(ctx, evalQueryType, []extraStage{ + { + after: "ResolveRefs", + stage: ast.QueryCompilerStageDefinition{ + Name: "RewriteToCaptureValue", + MetricName: "query_compile_stage_rewrite_to_capture_value", + Stage: r.rewriteQueryToCaptureValue, + }, + }, + }) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + switch r.target { + case targetWasm: // TODO(sr): make wasm a target plugin, too + + if r.hasWasmModule() { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, errors.New("wasm target not supported") + } + + var modules []*ast.Module + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + queries := []ast.Body{r.compiledQueries[evalQueryType].query} + + e, err := opa.LookupEngine(targetWasm) + if err != nil { + return PreparedEvalQuery{}, err + } + + // nolint: staticcheck // SA4006 false positive + cr, err := r.compileWasm(modules, queries, evalQueryType) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + // nolint: staticcheck // SA4006 false positive + data, err := r.store.Read(ctx, r.txn, storage.Path{}) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + o, err := e.New().WithPolicyBytes(cr.Bytes).WithDataJSON(data).Init() + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + r.opa = o + + case targetRego: // do nothing, don't lookup default plugin + default: // either a specific plugin target, or one that is default + if tgt := r.targetPlugin(r.target); tgt != nil { + queries := []ast.Body{r.compiledQueries[evalQueryType].query} + pol, err := r.planQuery(queries, evalQueryType) + if err != nil { + return PreparedEvalQuery{}, err + } + // always add the builtins provided via rego.FunctionN options + opts = append(opts, WithBuiltinFuncs(r.builtinFuncs)) + r.targetPrepState, err = tgt.PrepareForEval(ctx, pol, opts...) + if err != nil { + return PreparedEvalQuery{}, err + } + } + } + + txnErr := txnClose(ctx, err) // Always call closer + if txnErr != nil { + return PreparedEvalQuery{}, txnErr + } + + return PreparedEvalQuery{preparedQuery{r, pCfg}}, err +} + +// PrepareForPartial will parse inputs, modules, and query arguments in preparation +// of partially evaluating them. +func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) { + if !r.hasQuery() { + return PreparedPartialQuery{}, errors.New("cannot evaluate empty query") + } + + pCfg := &PrepareConfig{} + for _, o := range opts { + o(pCfg) + } + + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return PreparedPartialQuery{}, err + } + + err = r.prepare(ctx, partialQueryType, []extraStage{ + { + after: "CheckSafety", + stage: ast.QueryCompilerStageDefinition{ + Name: "RewriteEquals", + MetricName: "query_compile_stage_rewrite_equals", + Stage: r.rewriteEqualsForPartialQueryCompile, + }, + }, + }) + txnErr := txnClose(ctx, err) // Always call closer + if err != nil { + return PreparedPartialQuery{}, err + } + if txnErr != nil { + return PreparedPartialQuery{}, txnErr + } + + return PreparedPartialQuery{preparedQuery{r, pCfg}}, err +} + +func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage) error { + var err error + + r.parsedInput, err = r.parseInput() + if err != nil { + return err + } + + err = r.loadFiles(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + err = r.loadBundles(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + err = r.parseModules(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + // Compile the modules *before* the query, else functions + // defined in the module won't be found... + err = r.compileModules(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + imports, err := r.prepareImports() + if err != nil { + return err + } + + queryImports := []*ast.Import{} + for _, imp := range imports { + path := imp.Path.Value.(ast.Ref) + if path.HasPrefix([]*ast.Term{ast.FutureRootDocument}) || path.HasPrefix([]*ast.Term{ast.RegoRootDocument}) { + queryImports = append(queryImports, imp) + } + } + + r.parsedQuery, err = r.parseQuery(queryImports, r.metrics) + if err != nil { + return err + } + + err = r.compileAndCacheQuery(qType, r.parsedQuery, imports, r.metrics, extras) + if err != nil { + return err + } + + return nil +} + +func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { + if len(r.modules) == 0 { + return nil + } + + ids, err := r.store.ListPolicies(ctx, txn) + if err != nil { + return err + } + + m.Timer(metrics.RegoModuleParse).Start() + defer m.Timer(metrics.RegoModuleParse).Stop() + var errs Errors + + // Parse any modules that are saved to the store, but only if + // another compile step is going to occur (ie. we have parsed modules + // that need to be compiled). + for _, id := range ids { + // if it is already on the compiler we're using + // then don't bother to re-parse it from source + if _, haveMod := r.compiler.Modules[id]; haveMod { + continue + } + + bs, err := r.store.GetPolicy(ctx, txn, id) + if err != nil { + return err + } + + parsed, err := ast.ParseModuleWithOpts(id, string(bs), ast.ParserOptions{RegoVersion: r.regoVersion}) + if err != nil { + errs = append(errs, err) + } + + r.parsedModules[id] = parsed + } + + // Parse any passed in as arguments to the Rego object + for _, module := range r.modules { + p, err := module.ParseWithOpts(ast.ParserOptions{RegoVersion: r.regoVersion}) + if err != nil { + switch errorWithType := err.(type) { + case ast.Errors: + for _, e := range errorWithType { + errs = append(errs, e) + } + default: + errs = append(errs, errorWithType) + } + } + r.parsedModules[module.filename] = p + } + + if len(errs) > 0 { + return errs + } + + return nil +} + +func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { + if len(r.loadPaths.paths) == 0 { + return nil + } + + m.Timer(metrics.RegoLoadFiles).Start() + defer m.Timer(metrics.RegoLoadFiles).Stop() + + result, err := loader.NewFileLoader(). + WithMetrics(m). + WithProcessAnnotation(true). + WithRegoVersion(r.regoVersion). + WithCapabilities(r.capabilities). + Filtered(r.loadPaths.paths, r.loadPaths.filter) + if err != nil { + return err + } + for name, mod := range result.Modules { + r.parsedModules[name] = mod.Parsed + } + + if len(result.Documents) > 0 { + err = r.store.Write(ctx, txn, storage.AddOp, storage.Path{}, result.Documents) + if err != nil { + return err + } + } + return nil +} + +func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.Metrics) error { + if len(r.bundlePaths) == 0 { + return nil + } + + m.Timer(metrics.RegoLoadBundles).Start() + defer m.Timer(metrics.RegoLoadBundles).Stop() + + for _, path := range r.bundlePaths { + bndl, err := loader.NewFileLoader(). + WithMetrics(m). + WithProcessAnnotation(true). + WithSkipBundleVerification(r.skipBundleVerification). + WithRegoVersion(r.regoVersion). + WithCapabilities(r.capabilities). + AsBundle(path) + if err != nil { + return fmt.Errorf("loading error: %s", err) + } + r.bundles[path] = bndl + } + return nil +} + +func (r *Rego) parseInput() (ast.Value, error) { + if r.parsedInput != nil { + return r.parsedInput, nil + } + return r.parseRawInput(r.rawInput, r.metrics) +} + +func (*Rego) parseRawInput(rawInput *interface{}, m metrics.Metrics) (ast.Value, error) { + var input ast.Value + + if rawInput == nil { + return input, nil + } + + m.Timer(metrics.RegoInputParse).Start() + defer m.Timer(metrics.RegoInputParse).Stop() + + rawPtr := util.Reference(rawInput) + + // roundtrip through json: this turns slices (e.g. []string, []bool) into + // []interface{}, the only array type ast.InterfaceToValue can work with + if err := util.RoundTrip(rawPtr); err != nil { + return nil, err + } + + return ast.InterfaceToValue(*rawPtr) +} + +func (r *Rego) parseQuery(queryImports []*ast.Import, m metrics.Metrics) (ast.Body, error) { + if r.parsedQuery != nil { + return r.parsedQuery, nil + } + + m.Timer(metrics.RegoQueryParse).Start() + defer m.Timer(metrics.RegoQueryParse).Stop() + + popts, err := future.ParserOptionsFromFutureImports(queryImports) + if err != nil { + return nil, err + } + popts.RegoVersion = r.regoVersion + popts, err = parserOptionsFromRegoVersionImport(queryImports, popts) + if err != nil { + return nil, err + } + popts.SkipRules = true + return ast.ParseBodyWithOpts(r.query, popts) +} + +func parserOptionsFromRegoVersionImport(imports []*ast.Import, popts ast.ParserOptions) (ast.ParserOptions, error) { + for _, imp := range imports { + path := imp.Path.Value.(ast.Ref) + if ast.Compare(path, ast.RegoV1CompatibleRef) == 0 { + popts.RegoVersion = ast.RegoV1 + return popts, nil + } + } + return popts, nil +} + +func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { + + // Only compile again if there are new modules. + if len(r.bundles) > 0 || len(r.parsedModules) > 0 { + + // The bundle.Activate call will activate any bundles passed in + // (ie compile + handle data store changes), and include any of + // the additional modules passed in. If no bundles are provided + // it will only compile the passed in modules. + // Use this as the single-point of compiling everything only a + // single time. + opts := &bundle.ActivateOpts{ + Ctx: ctx, + Store: r.store, + Txn: txn, + Compiler: r.compilerForTxn(ctx, r.store, txn), + Metrics: m, + Bundles: r.bundles, + ExtraModules: r.parsedModules, + ParserOptions: ast.ParserOptions{RegoVersion: r.regoVersion}, + } + err := bundle.Activate(opts) + if err != nil { + return err + } + } + + // Ensure all configured resolvers from the store are loaded. Skip if any were explicitly provided. + if len(r.resolvers) == 0 { + resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, r.store, txn, r.bundles) + if err != nil { + return err + } + + for _, rslvr := range resolvers { + for _, ep := range rslvr.Entrypoints() { + r.resolvers = append(r.resolvers, refResolver{ep, rslvr}) + } + } + } + return nil +} + +func (r *Rego) compileAndCacheQuery(qType queryType, query ast.Body, imports []*ast.Import, m metrics.Metrics, extras []extraStage) error { + m.Timer(metrics.RegoQueryCompile).Start() + defer m.Timer(metrics.RegoQueryCompile).Stop() + + cachedQuery, ok := r.compiledQueries[qType] + if ok && cachedQuery.query != nil && cachedQuery.compiler != nil { + return nil + } + + qc, compiled, err := r.compileQuery(query, imports, m, extras) + if err != nil { + return err + } + + // cache the query for future use + r.compiledQueries[qType] = compiledQuery{ + query: compiled, + compiler: qc, + } + return nil +} + +func (r *Rego) prepareImports() ([]*ast.Import, error) { + imports := r.parsedImports + + if len(r.imports) > 0 { + s := make([]string, len(r.imports)) + for i := range r.imports { + s[i] = fmt.Sprintf("import %v", r.imports[i]) + } + parsed, err := ast.ParseImports(strings.Join(s, "\n")) + if err != nil { + return nil, err + } + imports = append(imports, parsed...) + } + return imports, nil +} + +func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) { + var pkg *ast.Package + + if r.pkg != "" { + var err error + pkg, err = ast.ParsePackage(fmt.Sprintf("package %v", r.pkg)) + if err != nil { + return nil, nil, err + } + } else { + pkg = r.parsedPackage + } + + qctx := ast.NewQueryContext(). + WithPackage(pkg). + WithImports(imports) + + qc := r.compiler.QueryCompiler(). + WithContext(qctx). + WithUnsafeBuiltins(r.unsafeBuiltins). + WithEnablePrintStatements(r.enablePrintStatements). + WithStrict(false) + + for _, extra := range extras { + qc = qc.WithStageAfter(extra.after, extra.stage) + } + + compiled, err := qc.Compile(query) + + return qc, compiled, err + +} + +func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) { + switch { + case r.targetPrepState != nil: // target plugin flow + var val ast.Value + if r.runtime != nil { + val = r.runtime.Value + } + s, err := r.targetPrepState.Eval(ctx, ectx, val) + if err != nil { + return nil, err + } + return r.valueToQueryResult(s, ectx) + case r.target == targetWasm: + return r.evalWasm(ctx, ectx) + case r.target == targetRego: // continue + } + + q := topdown.NewQuery(ectx.compiledQuery.query). + WithQueryCompiler(ectx.compiledQuery.compiler). + WithCompiler(r.compiler). + WithStore(r.store). + WithTransaction(ectx.txn). + WithBuiltins(r.builtinFuncs). + WithMetrics(ectx.metrics). + WithInstrumentation(ectx.instrumentation). + WithRuntime(r.runtime). + WithIndexing(ectx.indexing). + WithEarlyExit(ectx.earlyExit). + WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). + WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). + WithStrictBuiltinErrors(r.strictBuiltinErrors). + WithBuiltinErrorList(r.builtinErrorList). + WithSeed(ectx.seed). + WithPrintHook(ectx.printHook). + WithDistributedTracingOpts(r.distributedTacingOpts). + WithVirtualCache(ectx.virtualCache). + WithBaseCache(ectx.baseCache) + + if !ectx.time.IsZero() { + q = q.WithTime(ectx.time) + } + + if ectx.ndBuiltinCache != nil { + q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) + } + + for i := range ectx.queryTracers { + q = q.WithQueryTracer(ectx.queryTracers[i]) + } + + if ectx.parsedInput != nil { + q = q.WithInput(ast.NewTerm(ectx.parsedInput)) + } + + if ectx.httpRoundTripper != nil { + q = q.WithHTTPRoundTripper(ectx.httpRoundTripper) + } + + for i := range ectx.resolvers { + q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) + } + + // Cancel query if context is cancelled or deadline is reached. + c := topdown.NewCancel() + q = q.WithCancel(c) + exit := make(chan struct{}) + defer close(exit) + go waitForDone(ctx, exit, func() { + c.Cancel() + }) + + var rs ResultSet + err := q.Iter(ctx, func(qr topdown.QueryResult) error { + result, err := r.generateResult(qr, ectx) + if err != nil { + return err + } + rs = append(rs, result) + return nil + }) + + if err != nil { + return nil, err + } + + if len(rs) == 0 { + return nil, nil + } + + return rs, nil +} + +func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, error) { + input := ectx.rawInput + if ectx.parsedInput != nil { + i := interface{}(ectx.parsedInput) + input = &i + } + result, err := r.opa.Eval(ctx, opa.EvalOpts{ + Metrics: r.metrics, + Input: input, + Time: ectx.time, + Seed: ectx.seed, + InterQueryBuiltinCache: ectx.interQueryBuiltinCache, + NDBuiltinCache: ectx.ndBuiltinCache, + PrintHook: ectx.printHook, + Capabilities: ectx.capabilities, + }) + if err != nil { + return nil, err + } + + parsed, err := ast.ParseTerm(string(result.Result)) + if err != nil { + return nil, err + } + + return r.valueToQueryResult(parsed.Value, ectx) +} + +func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) { + resultSet, ok := res.(ast.Set) + if !ok { + return nil, errors.New("illegal result type") + } + + if resultSet.Len() == 0 { + return nil, nil + } + + var rs ResultSet + err := resultSet.Iter(func(term *ast.Term) error { + obj, ok := term.Value.(ast.Object) + if !ok { + return errors.New("illegal result type") + } + qr := topdown.QueryResult{} + obj.Foreach(func(k, v *ast.Term) { + kvt := ast.VarTerm(string(k.Value.(ast.String))) + qr[kvt.Value.(ast.Var)] = v + }) + result, err := r.generateResult(qr, ectx) + if err != nil { + return err + } + rs = append(rs, result) + return nil + }) + + return rs, err +} + +func (r *Rego) generateResult(qr topdown.QueryResult, ectx *EvalContext) (Result, error) { + + rewritten := ectx.compiledQuery.compiler.RewrittenVars() + + result := newResult() + for k, term := range qr { + v, err := r.generateJSON(term, ectx) + if err != nil { + return result, err + } + + if rw, ok := rewritten[k]; ok { + k = rw + } + if isTermVar(k) || isTermWasmVar(k) || k.IsGenerated() || k.IsWildcard() { + continue + } + result.Bindings[string(k)] = v + } + + for _, expr := range ectx.compiledQuery.query { + if expr.Generated { + continue + } + + if k, ok := r.capture[expr]; ok { + v, err := r.generateJSON(qr[k], ectx) + if err != nil { + return result, err + } + result.Expressions = append(result.Expressions, newExpressionValue(expr, v)) + } else { + result.Expressions = append(result.Expressions, newExpressionValue(expr, true)) + } + + } + return result, nil +} + +func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialResult, error) { + + err := r.prepare(ctx, partialResultQueryType, []extraStage{ + { + after: "ResolveRefs", + stage: ast.QueryCompilerStageDefinition{ + Name: "RewriteForPartialEval", + MetricName: "query_compile_stage_rewrite_for_partial_eval", + Stage: r.rewriteQueryForPartialEval, + }, + }, + }) + if err != nil { + return PartialResult{}, err + } + + ectx := &EvalContext{ + parsedInput: r.parsedInput, + metrics: r.metrics, + txn: r.txn, + partialNamespace: r.partialNamespace, + queryTracers: r.queryTracers, + compiledQuery: r.compiledQueries[partialResultQueryType], + instrumentation: r.instrumentation, + indexing: true, + resolvers: r.resolvers, + capabilities: r.capabilities, + strictBuiltinErrors: r.strictBuiltinErrors, + nondeterministicBuiltins: r.nondeterministicBuiltins, + } + + disableInlining := r.disableInlining + + if pCfg.disableInlining != nil { + disableInlining = *pCfg.disableInlining + } + + ectx.disableInlining, err = parseStringsToRefs(disableInlining) + if err != nil { + return PartialResult{}, err + } + + pq, err := r.partial(ctx, ectx) + if err != nil { + return PartialResult{}, err + } + + // Construct module for queries. + id := fmt.Sprintf("__partialresult__%s__", ectx.partialNamespace) + + module, err := ast.ParseModuleWithOpts(id, "package "+ectx.partialNamespace, + ast.ParserOptions{RegoVersion: r.regoVersion}) + if err != nil { + return PartialResult{}, errors.New("bad partial namespace") + } + + module.Rules = make([]*ast.Rule, len(pq.Queries)) + for i, body := range pq.Queries { + rule := &ast.Rule{ + Head: ast.NewHead(ast.Var("__result__"), nil, ast.Wildcard), + Body: body, + Module: module, + } + module.Rules[i] = rule + if checkPartialResultForRecursiveRefs(body, rule.Path()) { + return PartialResult{}, Errors{errPartialEvaluationNotEffective} + } + } + + // Update compiler with partial evaluation output. + r.compiler.Modules[id] = module + for i, module := range pq.Support { + r.compiler.Modules[fmt.Sprintf("__partialsupport__%s__%d__", ectx.partialNamespace, i)] = module + } + + r.metrics.Timer(metrics.RegoModuleCompile).Start() + r.compilerForTxn(ctx, r.store, r.txn).Compile(r.compiler.Modules) + r.metrics.Timer(metrics.RegoModuleCompile).Stop() + + if r.compiler.Failed() { + return PartialResult{}, r.compiler.Errors + } + + result := PartialResult{ + compiler: r.compiler, + store: r.store, + body: ast.MustParseBody(fmt.Sprintf("data.%v.__result__", ectx.partialNamespace)), + builtinDecls: r.builtinDecls, + builtinFuncs: r.builtinFuncs, + } + + return result, nil +} + +func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, error) { + + var unknowns []*ast.Term + + switch { + case ectx.parsedUnknowns != nil: + unknowns = ectx.parsedUnknowns + case ectx.unknowns != nil: + unknowns = make([]*ast.Term, len(ectx.unknowns)) + for i := range ectx.unknowns { + var err error + unknowns[i], err = ast.ParseTerm(ectx.unknowns[i]) + if err != nil { + return nil, err + } + } + default: + // Use input document as unknown if caller has not specified any. + unknowns = []*ast.Term{ast.NewTerm(ast.InputRootRef)} + } + + q := topdown.NewQuery(ectx.compiledQuery.query). + WithQueryCompiler(ectx.compiledQuery.compiler). + WithCompiler(r.compiler). + WithStore(r.store). + WithTransaction(ectx.txn). + WithBuiltins(r.builtinFuncs). + WithMetrics(ectx.metrics). + WithInstrumentation(ectx.instrumentation). + WithUnknowns(unknowns). + WithDisableInlining(ectx.disableInlining). + WithNondeterministicBuiltins(ectx.nondeterministicBuiltins). + WithRuntime(r.runtime). + WithIndexing(ectx.indexing). + WithEarlyExit(ectx.earlyExit). + WithPartialNamespace(ectx.partialNamespace). + WithSkipPartialNamespace(r.skipPartialNamespace). + WithShallowInlining(r.shallowInlining). + WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). + WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). + WithStrictBuiltinErrors(ectx.strictBuiltinErrors). + WithSeed(ectx.seed). + WithPrintHook(ectx.printHook) + + if !ectx.time.IsZero() { + q = q.WithTime(ectx.time) + } + + if ectx.ndBuiltinCache != nil { + q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) + } + + for i := range ectx.queryTracers { + q = q.WithQueryTracer(ectx.queryTracers[i]) + } + + if ectx.parsedInput != nil { + q = q.WithInput(ast.NewTerm(ectx.parsedInput)) + } + + for i := range ectx.resolvers { + q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) + } + + // Cancel query if context is cancelled or deadline is reached. + c := topdown.NewCancel() + q = q.WithCancel(c) + exit := make(chan struct{}) + defer close(exit) + go waitForDone(ctx, exit, func() { + c.Cancel() + }) + + queries, support, err := q.PartialRun(ctx) + if err != nil { + return nil, err + } + + // If the target rego-version is v0, and the rego.v1 import is available, then we attempt to apply it to support modules. + if r.regoVersion == ast.RegoV0 && + (r.capabilities == nil || + r.capabilities.ContainsFeature(ast.FeatureRegoV1Import) || + r.capabilities.ContainsFeature(ast.FeatureRegoV1)) { + + for i, mod := range support { + // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that + // conflict with future keywords. + applyRegoVersion := true + + ast.WalkRules(mod, func(r *ast.Rule) bool { + name := r.Head.Name + if name == "" && len(r.Head.Reference) > 0 { + name = r.Head.Reference[0].Value.(ast.Var) + } + if ast.IsFutureKeywordForRegoVersion(name.String(), ast.RegoV0) { + applyRegoVersion = false + return true + } + return false + }) + + if applyRegoVersion { + ast.WalkVars(mod, func(v ast.Var) bool { + if ast.IsFutureKeywordForRegoVersion(v.String(), ast.RegoV0) { + applyRegoVersion = false + return true + } + return false + }) + } + + if applyRegoVersion { + support[i].SetRegoVersion(ast.RegoV0CompatV1) + } else { + support[i].SetRegoVersion(r.regoVersion) + } + } + } else { + // If the target rego-version is not v0, then we apply the target rego-version to the support modules. + for i := range support { + support[i].SetRegoVersion(r.regoVersion) + } + } + + pq := &PartialQueries{ + Queries: queries, + Support: support, + } + + return pq, nil +} + +func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { + + checkCapture := iteration(query) || len(query) > 1 + + for _, expr := range query { + + if expr.Negated { + continue + } + + if expr.IsAssignment() || expr.IsEquality() { + continue + } + + var capture *ast.Term + + // If the expression can be evaluated as a function, rewrite it to + // capture the return value. E.g., neq(1,2) becomes neq(1,2,x) but + // plus(1,2,x) does not get rewritten. + switch terms := expr.Terms.(type) { + case *ast.Term: + capture = r.generateTermVar() + expr.Terms = ast.Equality.Expr(terms, capture).Terms + r.capture[expr] = capture.Value.(ast.Var) + case []*ast.Term: + tpe := r.compiler.TypeEnv.Get(terms[0]) + if !types.Void(tpe) && types.Arity(tpe) == len(terms)-1 { + capture = r.generateTermVar() + expr.Terms = append(terms, capture) + r.capture[expr] = capture.Value.(ast.Var) + } + } + + if capture != nil && checkCapture { + cpy := expr.Copy() + cpy.Terms = capture + cpy.Generated = true + cpy.With = nil + query.Append(cpy) + } + } + + return query, nil +} + +func (*Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { + if len(query) != 1 { + return nil, errors.New("partial evaluation requires single ref (not multiple expressions)") + } + + term, ok := query[0].Terms.(*ast.Term) + if !ok { + return nil, errors.New("partial evaluation requires ref (not expression)") + } + + ref, ok := term.Value.(ast.Ref) + if !ok { + return nil, fmt.Errorf("partial evaluation requires ref (not %v)", ast.ValueName(term.Value)) + } + + if !ref.IsGround() { + return nil, errors.New("partial evaluation requires ground ref") + } + + return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil +} + +// rewriteEqualsForPartialQueryCompile will rewrite == to = in queries. Normally +// this wouldn't be done, except for handling queries with the `Partial` API +// where rewriting them can substantially simplify the result, and it is unlikely +// that the caller would need expression values. +func (*Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { + doubleEq := ast.Equal.Ref() + unifyOp := ast.Equality.Ref() + ast.WalkExprs(query, func(x *ast.Expr) bool { + if x.IsCall() { + operator := x.Operator() + if operator.Equal(doubleEq) && len(x.Operands()) == 2 { + x.SetOperator(ast.NewTerm(unifyOp)) + } + } + return false + }) + return query, nil +} + +func (r *Rego) generateTermVar() *ast.Term { + r.termVarID++ + prefix := ast.WildcardPrefix + if p := r.targetPlugin(r.target); p != nil { + prefix = wasmVarPrefix + } else if r.target == targetWasm { + prefix = wasmVarPrefix + } + return ast.VarTerm(fmt.Sprintf("%sterm%v", prefix, r.termVarID)) +} + +func (r Rego) hasQuery() bool { + return len(r.query) != 0 || len(r.parsedQuery) != 0 +} + +func (r Rego) hasWasmModule() bool { + for _, b := range r.bundles { + if len(b.WasmModules) > 0 { + return true + } + } + return false +} + +type transactionCloser func(ctx context.Context, err error) error + +// getTxn will conditionally create a read or write transaction suitable for +// the configured Rego object. The returned function should be used to close the txn +// regardless of status. +func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) { + + noopCloser := func(_ context.Context, _ error) error { + return nil // no-op default + } + + if r.txn != nil { + // Externally provided txn + return r.txn, noopCloser, nil + } + + // Create a new transaction.. + params := storage.TransactionParams{} + + // Bundles and data paths may require writing data files or manifests to storage + if len(r.bundles) > 0 || len(r.bundlePaths) > 0 || len(r.loadPaths.paths) > 0 { + + // If we were given a store we will *not* write to it, only do that on one + // which was created automatically on behalf of the user. + if !r.ownStore { + return nil, noopCloser, errors.New("unable to start write transaction when store was provided") + } + + params.Write = true + } + + txn, err := r.store.NewTransaction(ctx, params) + if err != nil { + return nil, noopCloser, err + } + + // Setup a closer function that will abort or commit as needed. + closer := func(ctx context.Context, txnErr error) error { + var err error + + if txnErr == nil && params.Write { + err = r.store.Commit(ctx, txn) + } else { + r.store.Abort(ctx, txn) + } + + // Clear the auto created transaction now that it is closed. + r.txn = nil + + return err + } + + return txn, closer, nil +} + +func (r *Rego) compilerForTxn(ctx context.Context, store storage.Store, txn storage.Transaction) *ast.Compiler { + // Update the compiler to have a valid path conflict check + // for the current context and transaction. + return r.compiler.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn)) +} + +func checkPartialResultForRecursiveRefs(body ast.Body, path ast.Ref) bool { + var stop bool + ast.WalkRefs(body, func(x ast.Ref) bool { + if !stop { + if path.HasPrefix(x) { + stop = true + } + } + return stop + }) + return stop +} + +func isTermVar(v ast.Var) bool { + return strings.HasPrefix(string(v), ast.WildcardPrefix+"term") +} + +func isTermWasmVar(v ast.Var) bool { + return strings.HasPrefix(string(v), wasmVarPrefix+"term") +} + +func waitForDone(ctx context.Context, exit chan struct{}, f func()) { + select { + case <-exit: + return + case <-ctx.Done(): + f() + return + } +} + +type rawModule struct { + filename string + module string +} + +func (m rawModule) Parse() (*ast.Module, error) { + return ast.ParseModule(m.filename, m.module) +} + +func (m rawModule) ParseWithOpts(opts ast.ParserOptions) (*ast.Module, error) { + return ast.ParseModuleWithOpts(m.filename, m.module, opts) +} + +type extraStage struct { + after string + stage ast.QueryCompilerStageDefinition +} + +type refResolver struct { + ref ast.Ref + r resolver.Resolver +} + +func iteration(x interface{}) bool { + + var stopped bool + + vis := ast.NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case *ast.Term: + if ast.IsComprehension(x.Value) { + return true + } + case ast.Ref: + if !stopped { + if bi := ast.BuiltinMap[x.String()]; bi != nil { + if bi.Relation { + stopped = true + return stopped + } + } + for i := 1; i < len(x); i++ { + if _, ok := x[i].Value.(ast.Var); ok { + stopped = true + return stopped + } + } + } + return stopped + } + return stopped + }) + + vis.Walk(x) + + return stopped +} + +func parseStringsToRefs(s []string) ([]ast.Ref, error) { + + refs := make([]ast.Ref, len(s)) + for i := range refs { + var err error + refs[i], err = ast.ParseRef(s[i]) + if err != nil { + return nil, err + } + } + + return refs, nil +} + +// helper function to finish a built-in function call. If an error occurred, +// wrap the error and return it. Otherwise, invoke the iterator if the result +// was defined. +func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error { + if err != nil { + var e *HaltError + sb := strings.Builder{} + if errors.As(err, &e) { + sb.Grow(len(name) + len(e.Error()) + 2) + sb.WriteString(name) + sb.WriteString(": ") + sb.WriteString(e.Error()) + tdErr := &topdown.Error{ + Code: topdown.BuiltinErr, + Message: sb.String(), + Location: bctx.Location, + } + return topdown.Halt{Err: tdErr.Wrap(e)} + } + sb.Grow(len(name) + len(err.Error()) + 2) + sb.WriteString(name) + sb.WriteString(": ") + sb.WriteString(err.Error()) + tdErr := &topdown.Error{ + Code: topdown.BuiltinErr, + Message: sb.String(), + Location: bctx.Location, + } + return tdErr.Wrap(err) + } + if result == nil { + return nil + } + return iter(result) +} + +// helper function to return an option that sets a custom built-in function. +func newFunction(decl *Function, f topdown.BuiltinFunc) func(*Rego) { + return func(r *Rego) { + r.builtinDecls[decl.Name] = &ast.Builtin{ + Name: decl.Name, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + } + r.builtinFuncs[decl.Name] = &topdown.Builtin{ + Decl: r.builtinDecls[decl.Name], + Func: f, + } + } +} + +func generateJSON(term *ast.Term, ectx *EvalContext) (interface{}, error) { + return ast.JSONWithOpt(term.Value, + ast.JSONOpt{ + SortSets: ectx.sortSets, + CopyMaps: ectx.copyMaps, + }) +} + +func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Policy, error) { + modules := make([]*ast.Module, 0, len(r.compiler.Modules)) + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap)) + maps.Copy(decls, ast.BuiltinMap) + maps.Copy(decls, r.builtinDecls) + + const queryName = "eval" // NOTE(tsandall): the query name is arbitrary + + p := planner.New(). + WithQueries([]planner.QuerySet{ + { + Name: queryName, + Queries: queries, + RewrittenVars: r.compiledQueries[evalQueryType].compiler.RewrittenVars(), + }, + }). + WithModules(modules). + WithBuiltinDecls(decls). + WithDebug(r.dump) + + policy, err := p.Plan() + if err != nil { + return nil, err + } + if r.dump != nil { + fmt.Fprintln(r.dump, "PLAN:") + fmt.Fprintln(r.dump, "-----") + err = ir.Pretty(r.dump, policy) + if err != nil { + return nil, err + } + fmt.Fprintln(r.dump) + } + return policy, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go b/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go new file mode 100644 index 00000000000..cc0710426e0 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go @@ -0,0 +1,90 @@ +package rego + +import ( + "fmt" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// ResultSet represents a collection of output from Rego evaluation. An empty +// result set represents an undefined query. +type ResultSet []Result + +// Vars represents a collection of variable bindings. The keys are the variable +// names and the values are the binding values. +type Vars map[string]interface{} + +// WithoutWildcards returns a copy of v with wildcard variables removed. +func (v Vars) WithoutWildcards() Vars { + n := Vars{} + for k, v := range v { + if ast.Var(k).IsWildcard() || ast.Var(k).IsGenerated() { + continue + } + n[k] = v + } + return n +} + +// Result defines the output of Rego evaluation. +type Result struct { + Expressions []*ExpressionValue `json:"expressions"` + Bindings Vars `json:"bindings,omitempty"` +} + +func newResult() Result { + return Result{ + Bindings: Vars{}, + } +} + +// Location defines a position in a Rego query or module. +type Location struct { + Row int `json:"row"` + Col int `json:"col"` +} + +// ExpressionValue defines the value of an expression in a Rego query. +type ExpressionValue struct { + Value interface{} `json:"value"` + Text string `json:"text"` + Location *Location `json:"location"` +} + +func newExpressionValue(expr *ast.Expr, value interface{}) *ExpressionValue { + result := &ExpressionValue{ + Value: value, + } + if expr.Location != nil { + result.Text = string(expr.Location.Text) + result.Location = &Location{ + Row: expr.Location.Row, + Col: expr.Location.Col, + } + } + return result +} + +func (ev *ExpressionValue) String() string { + return fmt.Sprint(ev.Value) +} + +// Allowed is a helper method that'll return true if all of these conditions hold: +// - the result set only has one element +// - there is only one expression in the result set's only element +// - that expression has the value `true` +// - there are no bindings. +// +// If bindings are present, this will yield `false`: it would be a pitfall to +// return `true` for a query like `data.authz.allow = x`, which always has result +// set element with value true, but could also have a binding `x: false`. +func (rs ResultSet) Allowed() bool { + if len(rs) == 1 && len(rs[0].Bindings) == 0 { + if exprs := rs[0].Expressions; len(exprs) == 1 { + if b, ok := exprs[0].Value.(bool); ok { + return b + } + } + } + return false +} diff --git a/vendor/github.com/open-policy-agent/opa/resolver/interface.go b/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go similarity index 86% rename from vendor/github.com/open-policy-agent/opa/resolver/interface.go rename to vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go index fc02329f579..1f04d21c01b 100644 --- a/vendor/github.com/open-policy-agent/opa/resolver/interface.go +++ b/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go @@ -7,8 +7,8 @@ package resolver import ( "context" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" ) // Resolver defines an external value resolver for OPA evaluations. diff --git a/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go similarity index 94% rename from vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go rename to vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go index 9c13879dc33..c70daa8db64 100644 --- a/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go +++ b/vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go @@ -6,12 +6,13 @@ package wasm import ( "context" + "errors" "fmt" "strconv" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/rego/opa" - "github.com/open-policy-agent/opa/resolver" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/resolver" ) // New creates a new Resolver instance which is using the Wasm module @@ -144,7 +145,7 @@ func getResult(evalResult *opa.Result) (ast.Value, error) { resultSet, ok := parsed.Value.(ast.Set) if !ok { - return nil, fmt.Errorf("illegal result type") + return nil, errors.New("illegal result type") } if resultSet.Len() == 0 { @@ -152,14 +153,14 @@ func getResult(evalResult *opa.Result) (ast.Value, error) { } if resultSet.Len() > 1 { - return nil, fmt.Errorf("illegal result type") + return nil, errors.New("illegal result type") } var obj ast.Object err = resultSet.Iter(func(term *ast.Term) error { obj, ok = term.Value.(ast.Object) if !ok || obj.Len() != 1 { - return fmt.Errorf("illegal result type") + return errors.New("illegal result type") } return nil }) diff --git a/vendor/github.com/open-policy-agent/opa/schemas/authorizationPolicy.json b/vendor/github.com/open-policy-agent/opa/v1/schemas/authorizationPolicy.json similarity index 100% rename from vendor/github.com/open-policy-agent/opa/schemas/authorizationPolicy.json rename to vendor/github.com/open-policy-agent/opa/v1/schemas/authorizationPolicy.json diff --git a/vendor/github.com/open-policy-agent/opa/schemas/schemas.go b/vendor/github.com/open-policy-agent/opa/v1/schemas/schemas.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/schemas/schemas.go rename to vendor/github.com/open-policy-agent/opa/v1/schemas/schemas.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go b/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go new file mode 100644 index 00000000000..6fa2f86d98d --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go @@ -0,0 +1,6 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package storage exposes the policy engine's storage layer. +package storage diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go b/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go new file mode 100644 index 00000000000..a3d1c007370 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go @@ -0,0 +1,121 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "fmt" +) + +const ( + // InternalErr indicates an unknown, internal error has occurred. + InternalErr = "storage_internal_error" + + // NotFoundErr indicates the path used in the storage operation does not + // locate a document. + NotFoundErr = "storage_not_found_error" + + // WriteConflictErr indicates a write on the path enocuntered a conflicting + // value inside the transaction. + WriteConflictErr = "storage_write_conflict_error" + + // InvalidPatchErr indicates an invalid patch/write was issued. The patch + // was rejected. + InvalidPatchErr = "storage_invalid_patch_error" + + // InvalidTransactionErr indicates an invalid operation was performed + // inside of the transaction. + InvalidTransactionErr = "storage_invalid_txn_error" + + // TriggersNotSupportedErr indicates the caller attempted to register a + // trigger against a store that does not support them. + TriggersNotSupportedErr = "storage_triggers_not_supported_error" + + // WritesNotSupportedErr indicate the caller attempted to perform a write + // against a store that does not support them. + WritesNotSupportedErr = "storage_writes_not_supported_error" + + // PolicyNotSupportedErr indicate the caller attempted to perform a policy + // management operation against a store that does not support them. + PolicyNotSupportedErr = "storage_policy_not_supported_error" +) + +// Error is the error type returned by the storage layer. +type Error struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (err *Error) Error() string { + if err.Message != "" { + return fmt.Sprintf("%v: %v", err.Code, err.Message) + } + return err.Code +} + +// IsNotFound returns true if this error is a NotFoundErr. +func IsNotFound(err error) bool { + if err, ok := err.(*Error); ok { + return err.Code == NotFoundErr + } + return false +} + +// IsWriteConflictError returns true if this error a WriteConflictErr. +func IsWriteConflictError(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == WriteConflictErr + } + return false +} + +// IsInvalidPatch returns true if this error is a InvalidPatchErr. +func IsInvalidPatch(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == InvalidPatchErr + } + return false +} + +// IsInvalidTransaction returns true if this error is a InvalidTransactionErr. +func IsInvalidTransaction(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == InvalidTransactionErr + } + return false +} + +// IsIndexingNotSupported is a stub for backwards-compatibility. +// +// Deprecated: We no longer return IndexingNotSupported errors, so it is +// unnecessary to check for them. +func IsIndexingNotSupported(error) bool { return false } + +func writeConflictError(path Path) *Error { + return &Error{ + Code: WriteConflictErr, + Message: path.String(), + } +} + +func triggersNotSupportedError() *Error { + return &Error{ + Code: TriggersNotSupportedErr, + } +} + +func writesNotSupportedError() *Error { + return &Error{ + Code: WritesNotSupportedErr, + } +} + +func policyNotSupportedError() *Error { + return &Error{ + Code: PolicyNotSupportedErr, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go similarity index 94% rename from vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go rename to vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go index 5a8a6743fa4..9f14df0e5b7 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go @@ -8,10 +8,10 @@ import ( "fmt" "strconv" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/internal/errors" - "github.com/open-policy-agent/opa/storage/internal/ptr" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" + "github.com/open-policy-agent/opa/v1/storage/internal/ptr" ) type updateAST struct { @@ -101,8 +101,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i return nil, invalidPatchError("%v: invalid patch path", path) } - cpy := data.Copy() - cpy = cpy.Append(ast.NewTerm(value)) + cpy := data.Append(ast.NewTerm(value)) return &updateAST{path[:len(path)-1], false, cpy}, nil } @@ -114,7 +113,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i switch op { case storage.AddOp: var results []*ast.Term - for i := 0; i < data.Len(); i++ { + for i := range data.Len() { if i == pos { results = append(results, ast.NewTerm(value)) } @@ -125,7 +124,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i case storage.RemoveOp: var results []*ast.Term - for i := 0; i < data.Len(); i++ { + for i := range data.Len() { if i != pos { results = append(results, data.Elem(i)) } @@ -134,7 +133,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i default: var results []*ast.Term - for i := 0; i < data.Len(); i++ { + for i := range data.Len() { if i == pos { results = append(results, ast.NewTerm(value)) } else { @@ -296,7 +295,7 @@ func removeInAstArray(arr *ast.Array, path storage.Path) (ast.Value, error) { if len(path) == 1 { var elems []*ast.Term // Note: possibly expensive operation for large data. - for i := 0; i < arr.Len(); i++ { + for i := range arr.Len() { if i == idx { continue } diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go similarity index 97% rename from vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go rename to vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go index 9f5b8ba2587..c70d234d745 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go @@ -24,10 +24,10 @@ import ( "sync" "sync/atomic" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/merge" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/util" ) // New returns an empty in-memory store. @@ -185,7 +185,9 @@ func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params s } } - if err != nil && err != io.EOF { + // err is known not to be nil at this point, as it getting assigned + // a non-nil value is the only way the loop above can exit. + if err != io.EOF { return err } @@ -442,7 +444,7 @@ func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) if len(path) == 0 { return data, true } - for i := 0; i < len(path)-1; i++ { + for i := range len(path) - 1 { value, ok := data[path[i]] if !ok { return nil, false diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go rename to vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go similarity index 98% rename from vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go rename to vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go index d3252e88221..f8a73039126 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go @@ -9,11 +9,11 @@ import ( "encoding/json" "strconv" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/deepcopy" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/internal/errors" - "github.com/open-policy-agent/opa/storage/internal/ptr" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" + "github.com/open-policy-agent/opa/v1/storage/internal/ptr" ) // transaction implements the low-level read/write operations on the in-memory diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go b/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go new file mode 100644 index 00000000000..94e02a47bcb --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go @@ -0,0 +1,247 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/metrics" +) + +// Transaction defines the interface that identifies a consistent snapshot over +// the policy engine's storage layer. +type Transaction interface { + ID() uint64 +} + +// Store defines the interface for the storage layer's backend. +type Store interface { + Trigger + Policy + + // NewTransaction is called create a new transaction in the store. + NewTransaction(context.Context, ...TransactionParams) (Transaction, error) + + // Read is called to fetch a document referred to by path. + Read(context.Context, Transaction, Path) (interface{}, error) + + // Write is called to modify a document referred to by path. + Write(context.Context, Transaction, PatchOp, Path, interface{}) error + + // Commit is called to finish the transaction. If Commit returns an error, the + // transaction must be automatically aborted by the Store implementation. + Commit(context.Context, Transaction) error + + // Truncate is called to make a copy of the underlying store, write documents in the new store + // by creating multiple transactions in the new store as needed and finally swapping + // over to the new storage instance. This method must be called within a transaction on the original store. + Truncate(context.Context, Transaction, TransactionParams, Iterator) error + + // Abort is called to cancel the transaction. + Abort(context.Context, Transaction) +} + +// MakeDirer defines the interface a Store could realize to override the +// generic MakeDir functionality in storage.MakeDir +type MakeDirer interface { + MakeDir(context.Context, Transaction, Path) error +} + +// TransactionParams describes a new transaction. +type TransactionParams struct { + + // BasePaths indicates the top-level paths where write operations will be performed in this transaction. + BasePaths []string + + // RootOverwrite is deprecated. Use BasePaths instead. + RootOverwrite bool + + // Write indicates if this transaction will perform any write operations. + Write bool + + // Context contains key/value pairs passed to triggers. + Context *Context +} + +// Context is a simple container for key/value pairs. +type Context struct { + values map[interface{}]interface{} +} + +// NewContext returns a new context object. +func NewContext() *Context { + return &Context{ + values: map[interface{}]interface{}{}, + } +} + +// Get returns the key value in the context. +func (ctx *Context) Get(key interface{}) interface{} { + if ctx == nil { + return nil + } + return ctx.values[key] +} + +// Put adds a key/value pair to the context. +func (ctx *Context) Put(key, value interface{}) { + ctx.values[key] = value +} + +var metricsKey = struct{}{} + +// WithMetrics allows passing metrics via the Context. +// It puts the metrics object in the ctx, and returns the same +// ctx (not a copy) for convenience. +func (ctx *Context) WithMetrics(m metrics.Metrics) *Context { + ctx.values[metricsKey] = m + return ctx +} + +// Metrics() allows using a Context's metrics. Returns nil if metrics +// were not attached to the Context. +func (ctx *Context) Metrics() metrics.Metrics { + if m, ok := ctx.values[metricsKey]; ok { + if met, ok := m.(metrics.Metrics); ok { + return met + } + } + return nil +} + +// WriteParams specifies the TransactionParams for a write transaction. +var WriteParams = TransactionParams{ + Write: true, +} + +// PatchOp is the enumeration of supposed modifications. +type PatchOp int + +// Patch supports add, remove, and replace operations. +const ( + AddOp PatchOp = iota + RemoveOp = iota + ReplaceOp = iota +) + +// WritesNotSupported provides a default implementation of the write +// interface which may be used if the backend does not support writes. +type WritesNotSupported struct{} + +func (WritesNotSupported) Write(context.Context, Transaction, PatchOp, Path, interface{}) error { + return writesNotSupportedError() +} + +// Policy defines the interface for policy module storage. +type Policy interface { + ListPolicies(context.Context, Transaction) ([]string, error) + GetPolicy(context.Context, Transaction, string) ([]byte, error) + UpsertPolicy(context.Context, Transaction, string, []byte) error + DeletePolicy(context.Context, Transaction, string) error +} + +// PolicyNotSupported provides a default implementation of the policy interface +// which may be used if the backend does not support policy storage. +type PolicyNotSupported struct{} + +// ListPolicies always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) ListPolicies(context.Context, Transaction) ([]string, error) { + return nil, policyNotSupportedError() +} + +// GetPolicy always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) GetPolicy(context.Context, Transaction, string) ([]byte, error) { + return nil, policyNotSupportedError() +} + +// UpsertPolicy always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) UpsertPolicy(context.Context, Transaction, string, []byte) error { + return policyNotSupportedError() +} + +// DeletePolicy always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) DeletePolicy(context.Context, Transaction, string) error { + return policyNotSupportedError() +} + +// PolicyEvent describes a change to a policy. +type PolicyEvent struct { + ID string + Data []byte + Removed bool +} + +// DataEvent describes a change to a base data document. +type DataEvent struct { + Path Path + Data interface{} + Removed bool +} + +// TriggerEvent describes the changes that caused the trigger to be invoked. +type TriggerEvent struct { + Policy []PolicyEvent + Data []DataEvent + Context *Context +} + +// IsZero returns true if the TriggerEvent indicates no changes occurred. This +// function is primarily for test purposes. +func (e TriggerEvent) IsZero() bool { + return !e.PolicyChanged() && !e.DataChanged() +} + +// PolicyChanged returns true if the trigger was caused by a policy change. +func (e TriggerEvent) PolicyChanged() bool { + return len(e.Policy) > 0 +} + +// DataChanged returns true if the trigger was caused by a data change. +func (e TriggerEvent) DataChanged() bool { + return len(e.Data) > 0 +} + +// TriggerConfig contains the trigger registration configuration. +type TriggerConfig struct { + + // OnCommit is invoked when a transaction is successfully committed. The + // callback is invoked with a handle to the write transaction that + // successfully committed before other clients see the changes. + OnCommit func(context.Context, Transaction, TriggerEvent) +} + +// Trigger defines the interface that stores implement to register for change +// notifications when the store is changed. +type Trigger interface { + Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) +} + +// TriggersNotSupported provides default implementations of the Trigger +// interface which may be used if the backend does not support triggers. +type TriggersNotSupported struct{} + +// Register always returns an error indicating triggers are not supported. +func (TriggersNotSupported) Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) { + return nil, triggersNotSupportedError() +} + +// TriggerHandle defines the interface that can be used to unregister triggers that have +// been registered on a Store. +type TriggerHandle interface { + Unregister(context.Context, Transaction) +} + +// Iterator defines the interface that can be used to read files from a directory starting with +// files at the base of the directory, then sub-directories etc. +type Iterator interface { + Next() (*Update, error) +} + +// Update contains information about a file +type Update struct { + Path Path + Value []byte + IsPolicy bool +} diff --git a/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go similarity index 85% rename from vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go rename to vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go index 0bba74b9077..778f30d1f49 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go @@ -8,7 +8,7 @@ package errors import ( "fmt" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/storage" ) const ArrayIndexTypeMsg = "array index must be integer" @@ -20,7 +20,11 @@ func NewNotFoundError(path storage.Path) *storage.Error { } func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error { - return NewNotFoundErrorf("%v: %v", path.String(), hint) + message := path.String() + ": " + hint + return &storage.Error{ + Code: storage.NotFoundErr, + Message: message, + } } func NewNotFoundErrorf(f string, a ...interface{}) *storage.Error { diff --git a/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go similarity index 83% rename from vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go rename to vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go index 14adbd682ef..902e73546e9 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go @@ -8,9 +8,9 @@ package ptr import ( "strconv" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/internal/errors" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" ) func Ptr(data interface{}, path storage.Path) (interface{}, error) { @@ -43,8 +43,15 @@ func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) { key := path[i] switch curr := node.(type) { case ast.Object: - keyTerm := ast.StringTerm(key) + // This term is only created for the lookup, which is not.. ideal. + // By using the pool, we can at least avoid allocating the term itself, + // while still having to pay 1 allocation for the value. A better solution + // would be dynamically interned string terms. + keyTerm := ast.TermPtrPool.Get() + keyTerm.Value = ast.String(key) + val := curr.Get(keyTerm) + ast.TermPtrPool.Put(keyTerm) if val == nil { return nil, errors.NewNotFoundError(path) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/path.go b/vendor/github.com/open-policy-agent/opa/v1/storage/path.go new file mode 100644 index 00000000000..f774d2eeda7 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/path.go @@ -0,0 +1,162 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// Path refers to a document in storage. +type Path []string + +// ParsePath returns a new path for the given str. +func ParsePath(str string) (path Path, ok bool) { + if len(str) == 0 { + return nil, false + } + if str[0] != '/' { + return nil, false + } + if len(str) == 1 { + return Path{}, true + } + parts := strings.Split(str[1:], "/") + return parts, true +} + +// ParsePathEscaped returns a new path for the given escaped str. +func ParsePathEscaped(str string) (path Path, ok bool) { + path, ok = ParsePath(str) + if !ok { + return + } + for i := range path { + segment, err := url.PathUnescape(path[i]) + if err == nil { + path[i] = segment + } + } + return +} + +// NewPathForRef returns a new path for the given ref. +func NewPathForRef(ref ast.Ref) (path Path, err error) { + + if len(ref) == 0 { + return nil, errors.New("empty reference (indicates error in caller)") + } + + if len(ref) == 1 { + return Path{}, nil + } + + path = make(Path, 0, len(ref)-1) + + for _, term := range ref[1:] { + switch v := term.Value.(type) { + case ast.String: + path = append(path, string(v)) + case ast.Number: + path = append(path, v.String()) + case ast.Boolean, ast.Null: + return nil, &Error{ + Code: NotFoundErr, + Message: fmt.Sprintf("%v: does not exist", ref), + } + case *ast.Array, ast.Object, ast.Set: + return nil, fmt.Errorf("composites cannot be base document keys: %v", ref) + default: + return nil, fmt.Errorf("unresolved reference (indicates error in caller): %v", ref) + } + } + + return path, nil +} + +// Compare performs lexigraphical comparison on p and other and returns -1 if p +// is less than other, 0 if p is equal to other, or 1 if p is greater than +// other. +func (p Path) Compare(other Path) (cmp int) { + for i := range min(len(p), len(other)) { + if cmp := strings.Compare(p[i], other[i]); cmp != 0 { + return cmp + } + } + if len(p) < len(other) { + return -1 + } + if len(p) == len(other) { + return 0 + } + return 1 +} + +// Equal returns true if p is the same as other. +func (p Path) Equal(other Path) bool { + return p.Compare(other) == 0 +} + +// HasPrefix returns true if p starts with other. +func (p Path) HasPrefix(other Path) bool { + if len(other) > len(p) { + return false + } + for i := range other { + if p[i] != other[i] { + return false + } + } + return true +} + +// Ref returns a ref that represents p rooted at head. +func (p Path) Ref(head *ast.Term) (ref ast.Ref) { + ref = make(ast.Ref, len(p)+1) + ref[0] = head + for i := range p { + idx, err := strconv.ParseInt(p[i], 10, 64) + if err == nil { + ref[i+1] = ast.UIntNumberTerm(uint64(idx)) + } else { + ref[i+1] = ast.StringTerm(p[i]) + } + } + return ref +} + +func (p Path) String() string { + if len(p) == 0 { + return "/" + } + + l := 0 + for i := range p { + l += len(p[i]) + 1 + } + + sb := strings.Builder{} + sb.Grow(l) + for i := range p { + sb.WriteByte('/') + sb.WriteString(url.PathEscape(p[i])) + } + return sb.String() +} + +// MustParsePath returns a new Path for s. If s cannot be parsed, this function +// will panic. This is mostly for test purposes. +func MustParsePath(s string) Path { + path, ok := ParsePath(s) + if !ok { + panic(s) + } + return path +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go b/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go new file mode 100644 index 00000000000..34305f2912e --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go @@ -0,0 +1,136 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// NewTransactionOrDie is a helper function to create a new transaction. If the +// storage layer cannot create a new transaction, this function will panic. This +// function should only be used for tests. +func NewTransactionOrDie(ctx context.Context, store Store, params ...TransactionParams) Transaction { + txn, err := store.NewTransaction(ctx, params...) + if err != nil { + panic(err) + } + return txn +} + +// ReadOne is a convenience function to read a single value from the provided Store. It +// will create a new Transaction to perform the read with, and clean up after itself +// should an error occur. +func ReadOne(ctx context.Context, store Store, path Path) (interface{}, error) { + txn, err := store.NewTransaction(ctx) + if err != nil { + return nil, err + } + defer store.Abort(ctx, txn) + + return store.Read(ctx, txn, path) +} + +// WriteOne is a convenience function to write a single value to the provided Store. It +// will create a new Transaction to perform the write with, and clean up after itself +// should an error occur. +func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value interface{}) error { + txn, err := store.NewTransaction(ctx, WriteParams) + if err != nil { + return err + } + + if err := store.Write(ctx, txn, op, path, value); err != nil { + store.Abort(ctx, txn) + return err + } + + return store.Commit(ctx, txn) +} + +// MakeDir inserts an empty object at path. If the parent path does not exist, +// MakeDir will create it recursively. +func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error { + + // Allow the Store implementation to deal with this in its own way. + if md, ok := store.(MakeDirer); ok { + return md.MakeDir(ctx, txn, path) + } + + if len(path) == 0 { + return nil + } + + node, err := store.Read(ctx, txn, path) + if err != nil { + if !IsNotFound(err) { + return err + } + + if err := MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { + return err + } + + return store.Write(ctx, txn, AddOp, path, map[string]interface{}{}) + } + + if _, ok := node.(map[string]interface{}); ok { + return nil + } + + if _, ok := node.(ast.Object); ok { + return nil + } + + return writeConflictError(path) +} + +// Txn is a convenience function that executes f inside a new transaction +// opened on the store. If the function returns an error, the transaction is +// aborted and the error is returned. Otherwise, the transaction is committed +// and the result of the commit is returned. +func Txn(ctx context.Context, store Store, params TransactionParams, f func(Transaction) error) error { + + txn, err := store.NewTransaction(ctx, params) + if err != nil { + return err + } + + if err := f(txn); err != nil { + store.Abort(ctx, txn) + return err + } + + return store.Commit(ctx, txn) +} + +// NonEmpty returns a function that tests if a path is non-empty. A +// path is non-empty if a Read on the path returns a value or a Read +// on any of the path prefixes returns a non-object value. +func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string) (bool, error) { + return func(path []string) (bool, error) { + if _, err := store.Read(ctx, txn, Path(path)); err == nil { + return true, nil + } else if !IsNotFound(err) { + return false, err + } + for i := len(path) - 1; i > 0; i-- { + val, err := store.Read(ctx, txn, Path(path[:i])) + if err != nil && !IsNotFound(err) { + return false, err + } else if err == nil { + if _, ok := val.(map[string]interface{}); ok { + return false, nil + } + if _, ok := val.(ast.Object); ok { + return false, nil + } + return true, nil + } + } + return false, nil + } +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go similarity index 80% rename from vendor/github.com/open-policy-agent/opa/topdown/aggregates.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go index a0f67a7c956..fb59fd07f05 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go @@ -7,20 +7,20 @@ package topdown import ( "math/big" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch a := operands[0].Value.(type) { case *ast.Array: - return iter(ast.IntNumberTerm(a.Len())) + return iter(ast.InternedIntNumberTerm(a.Len())) case ast.Object: - return iter(ast.IntNumberTerm(a.Len())) + return iter(ast.InternedIntNumberTerm(a.Len())) case ast.Set: - return iter(ast.IntNumberTerm(a.Len())) + return iter(ast.InternedIntNumberTerm(a.Len())) case ast.String: - return iter(ast.IntNumberTerm(len([]rune(a)))) + return iter(ast.InternedIntNumberTerm(len([]rune(a)))) } return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "object", "set", "string") } @@ -99,7 +99,7 @@ func builtinMax(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err if a.Len() == 0 { return nil } - var max = ast.Value(ast.Null{}) + max := ast.InternedNullTerm.Value a.Foreach(func(x *ast.Term) { if ast.Compare(max, x.Value) <= 0 { max = x.Value @@ -110,7 +110,7 @@ func builtinMax(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err if a.Len() == 0 { return nil } - max, err := a.Reduce(ast.NullTerm(), func(max *ast.Term, elem *ast.Term) (*ast.Term, error) { + max, err := a.Reduce(ast.InternedNullTerm, func(max *ast.Term, elem *ast.Term) (*ast.Term, error) { if ast.Compare(max, elem) <= 0 { return elem, nil } @@ -142,11 +142,11 @@ func builtinMin(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err if a.Len() == 0 { return nil } - min, err := a.Reduce(ast.NullTerm(), func(min *ast.Term, elem *ast.Term) (*ast.Term, error) { + min, err := a.Reduce(ast.InternedNullTerm, func(min *ast.Term, elem *ast.Term) (*ast.Term, error) { // The null term is considered to be less than any other term, // so in order for min of a set to make sense, we need to check // for it. - if min.Value.Compare(ast.Null{}) == 0 { + if min.Value.Compare(ast.InternedNullTerm.Value) == 0 { return elem, nil } @@ -178,7 +178,7 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err switch val := operands[0].Value.(type) { case ast.Set: res := true - match := ast.BooleanTerm(true) + match := ast.InternedBooleanTerm(true) val.Until(func(term *ast.Term) bool { if !match.Equal(term) { res = false @@ -186,10 +186,10 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err } return false }) - return iter(ast.BooleanTerm(res)) + return iter(ast.InternedBooleanTerm(res)) case *ast.Array: res := true - match := ast.BooleanTerm(true) + match := ast.InternedBooleanTerm(true) val.Until(func(term *ast.Term) bool { if !match.Equal(term) { res = false @@ -197,7 +197,7 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err } return false }) - return iter(ast.BooleanTerm(res)) + return iter(ast.InternedBooleanTerm(res)) default: return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set") } @@ -206,11 +206,11 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err func builtinAny(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.Set: - res := val.Len() > 0 && val.Contains(ast.BooleanTerm(true)) - return iter(ast.BooleanTerm(res)) + res := val.Len() > 0 && val.Contains(ast.InternedBooleanTerm(true)) + return iter(ast.InternedBooleanTerm(res)) case *ast.Array: res := false - match := ast.BooleanTerm(true) + match := ast.InternedBooleanTerm(true) val.Until(func(term *ast.Term) bool { if match.Equal(term) { res = true @@ -218,7 +218,7 @@ func builtinAny(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err } return false }) - return iter(ast.BooleanTerm(res)) + return iter(ast.InternedBooleanTerm(res)) default: return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set") } @@ -228,27 +228,20 @@ func builtinMember(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) containee := operands[0] switch c := operands[1].Value.(type) { case ast.Set: - return iter(ast.BooleanTerm(c.Contains(containee))) + return iter(ast.InternedBooleanTerm(c.Contains(containee))) case *ast.Array: - ret := false - c.Until(func(v *ast.Term) bool { - if v.Value.Compare(containee.Value) == 0 { - ret = true + for i := range c.Len() { + if c.Elem(i).Value.Compare(containee.Value) == 0 { + return iter(ast.InternedBooleanTerm(true)) } - return ret - }) - return iter(ast.BooleanTerm(ret)) + } + return iter(ast.InternedBooleanTerm(false)) case ast.Object: - ret := false - c.Until(func(_, v *ast.Term) bool { - if v.Value.Compare(containee.Value) == 0 { - ret = true - } - return ret - }) - return iter(ast.BooleanTerm(ret)) + return iter(ast.InternedBooleanTerm(c.Until(func(_, v *ast.Term) bool { + return v.Value.Compare(containee.Value) == 0 + }))) } - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } func builtinMemberWithKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -259,9 +252,9 @@ func builtinMemberWithKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast if act := c.Get(key); act != nil { ret = act.Value.Compare(val.Value) == 0 } - return iter(ast.BooleanTerm(ret)) + return iter(ast.InternedBooleanTerm(ret)) } - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } func init() { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go similarity index 92% rename from vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go index 3ac703efa3c..acfbba3c745 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go @@ -5,11 +5,11 @@ package topdown import ( - "fmt" + "errors" "math/big" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type arithArity1 func(a *big.Float) (*big.Float, error) @@ -67,7 +67,7 @@ func builtinPlus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er y, ok2 := n2.Int() if ok1 && ok2 && inSmallIntRange(x) && inSmallIntRange(y) { - return iter(ast.IntNumberTerm(x + y)) + return iter(ast.InternedIntNumberTerm(x + y)) } f, err := arithPlus(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) @@ -91,7 +91,7 @@ func builtinMultiply(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term y, ok2 := n2.Int() if ok1 && ok2 && inSmallIntRange(x) && inSmallIntRange(y) { - return iter(ast.IntNumberTerm(x * y)) + return iter(ast.InternedIntNumberTerm(x * y)) } f, err := arithMultiply(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) @@ -116,14 +116,14 @@ func arithMultiply(a, b *big.Float) (*big.Float, error) { func arithDivide(a, b *big.Float) (*big.Float, error) { i, acc := b.Int64() if acc == big.Exact && i == 0 { - return nil, fmt.Errorf("divide by zero") + return nil, errors.New("divide by zero") } return new(big.Float).Quo(a, b), nil } func arithRem(a, b *big.Int) (*big.Int, error) { if b.Int64() == 0 { - return nil, fmt.Errorf("modulo by zero") + return nil, errors.New("modulo by zero") } return new(big.Int).Rem(a, b), nil } @@ -171,7 +171,7 @@ func builtinMinus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e y, oky := n2.Int() if okx && oky && inSmallIntRange(x) && inSmallIntRange(y) { - return iter(ast.IntNumberTerm(x - y)) + return iter(ast.InternedIntNumberTerm(x - y)) } f, err := arithMinus(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) @@ -210,17 +210,17 @@ func builtinRem(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err if okx && oky && inSmallIntRange(x) && inSmallIntRange(y) { if y == 0 { - return fmt.Errorf("modulo by zero") + return errors.New("modulo by zero") } - return iter(ast.IntNumberTerm(x % y)) + return iter(ast.InternedIntNumberTerm(x % y)) } op1, err1 := builtins.NumberToInt(n1) op2, err2 := builtins.NumberToInt(n2) if err1 != nil || err2 != nil { - return fmt.Errorf("modulo on floating-point number") + return errors.New("modulo on floating-point number") } i, err := arithRem(op1, op2) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/array.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go similarity index 86% rename from vendor/github.com/open-policy-agent/opa/topdown/array.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/array.go index e7fe5be6435..526e3ed26de 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/array.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -20,6 +20,13 @@ func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T return err } + if arrA.Len() == 0 { + return iter(operands[1]) + } + if arrB.Len() == 0 { + return iter(operands[0]) + } + arrC := make([]*ast.Term, arrA.Len()+arrB.Len()) i := 0 @@ -33,7 +40,7 @@ func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T i++ }) - return iter(ast.NewTerm(ast.NewArray(arrC...))) + return iter(ast.ArrayTerm(arrC...)) } func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -68,6 +75,10 @@ func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te startIndex = stopIndex } + if startIndex == 0 && stopIndex >= arr.Len() { + return iter(operands[0]) + } + return iter(ast.NewTerm(arr.Slice(startIndex, stopIndex))) } @@ -80,7 +91,7 @@ func builtinArrayReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast. length := arr.Len() reversedArr := make([]*ast.Term, length) - for index := 0; index < length; index++ { + for index := range length { reversedArr[index] = arr.Elem(length - index - 1) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/binary.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go similarity index 90% rename from vendor/github.com/open-policy-agent/opa/topdown/binary.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go index b4f9dbd3926..6f7ebaf40c2 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/binary.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinBinaryAnd(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/bindings.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/topdown/bindings.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go index 30a8ac5ec4d..8c7bfbd1787 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/bindings.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go @@ -6,9 +6,10 @@ package topdown import ( "fmt" + "strconv" "strings" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) type undo struct { @@ -68,7 +69,7 @@ func (u *bindings) Plug(a *ast.Term) *ast.Term { } func (u *bindings) PlugNamespaced(a *ast.Term, caller *bindings) *ast.Term { - if u != nil { + if u != nil && u.instr != nil { u.instr.startTimer(evalOpPlug) t := u.plugNamespaced(a, caller) u.instr.stopTimer(evalOpPlug) @@ -184,7 +185,7 @@ func (u *bindings) namespaceVar(v *ast.Term, caller *bindings) *ast.Term { // Root documents (i.e., data, input) should never be namespaced because they // are globally unique. if !ast.RootDocumentNames.Contains(v) { - return ast.NewTerm(ast.Var(string(name) + fmt.Sprint(u.id))) + return ast.NewTerm(ast.Var(string(name) + strconv.FormatUint(u.id, 10))) } } return v @@ -313,12 +314,12 @@ func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) { if b.a == nil { b.a = new([maxLinearScan]bindingArrayKeyValue) } else if i := b.find(key); i >= 0 { - (*b.a)[i].value = value + b.a[i].value = value return } if b.n < maxLinearScan { - (*b.a)[b.n] = bindingArrayKeyValue{key, value} + b.a[b.n] = bindingArrayKeyValue{key, value} b.n++ return } @@ -341,7 +342,7 @@ func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) { func (b *bindingsArrayHashmap) Get(key *ast.Term) (value, bool) { if b.m == nil { if i := b.find(key); i >= 0 { - return (*b.a)[i].value, true + return b.a[i].value, true } return value{}, false @@ -360,7 +361,7 @@ func (b *bindingsArrayHashmap) Delete(key *ast.Term) { if i := b.find(key); i >= 0 { n := b.n - 1 if i < n { - (*b.a)[i] = (*b.a)[n] + b.a[i] = b.a[n] } b.n = n @@ -373,8 +374,8 @@ func (b *bindingsArrayHashmap) Delete(key *ast.Term) { func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) { if b.m == nil { - for i := 0; i < b.n; i++ { - if f((*b.a)[i].key, (*b.a)[i].value) { + for i := range b.n { + if f(b.a[i].key, b.a[i].value) { return } } @@ -390,8 +391,8 @@ func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) { func (b *bindingsArrayHashmap) find(key *ast.Term) int { v := key.Value.(ast.Var) - for i := 0; i < b.n; i++ { - if (*b.a)[i].key.Value.(ast.Var) == v { + for i := range b.n { + if b.a[i].key.Value.(ast.Var) == v { return i } } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/bits.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go similarity index 96% rename from vendor/github.com/open-policy-agent/opa/topdown/bits.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go index 7a63c0df1eb..e420ffe611a 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/bits.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go @@ -7,8 +7,8 @@ package topdown import ( "math/big" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type bitsArity1 func(a *big.Int) (*big.Int, error) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/topdown/builtins.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go index cf694d4331b..e0b893d477c 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go @@ -11,12 +11,12 @@ import ( "io" "math/rand" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" ) type ( @@ -52,6 +52,7 @@ type ( QueryID uint64 // identifies query being evaluated ParentID uint64 // identifies parent of query being evaluated PrintHook print.Hook // provides callback function to use for printing + RoundTripper CustomizeRoundTripper // customize transport to use for HTTP requests DistributedTracingOpts tracing.Options // options to be used by distributed tracing. rand *rand.Rand // randomization source for non-security-sensitive operations Capabilities *ast.Capabilities diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go similarity index 94% rename from vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go index 353f9568405..9fcaea4a239 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go @@ -7,12 +7,13 @@ package builtins import ( "encoding/json" + "errors" "fmt" "math/big" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) // Cache defines the built-in cache used by the top-down evaluation. The keys @@ -97,7 +98,7 @@ func (c *NDBCache) UnmarshalJSON(data []byte) error { out[string(k.Value.(ast.String))] = obj return nil } - return fmt.Errorf("expected Object, got other Value type in conversion") + return errors.New("expected Object, got other Value type in conversion") }) if err != nil { return err @@ -128,23 +129,23 @@ func NewOperandErr(pos int, f string, a ...interface{}) error { func NewOperandTypeErr(pos int, got ast.Value, expected ...string) error { if len(expected) == 1 { - return NewOperandErr(pos, "must be %v but got %v", expected[0], ast.TypeName(got)) + return NewOperandErr(pos, "must be %v but got %v", expected[0], ast.ValueName(got)) } - return NewOperandErr(pos, "must be one of {%v} but got %v", strings.Join(expected, ", "), ast.TypeName(got)) + return NewOperandErr(pos, "must be one of {%v} but got %v", strings.Join(expected, ", "), ast.ValueName(got)) } // NewOperandElementErr returns an operand error indicating an element in the // composite operand was wrong. func NewOperandElementErr(pos int, composite ast.Value, got ast.Value, expected ...string) error { - tpe := ast.TypeName(composite) + tpe := ast.ValueName(composite) if len(expected) == 1 { - return NewOperandErr(pos, "must be %v of %vs but got %v containing %v", tpe, expected[0], tpe, ast.TypeName(got)) + return NewOperandErr(pos, "must be %v of %vs but got %v containing %v", tpe, expected[0], tpe, ast.ValueName(got)) } - return NewOperandErr(pos, "must be %v of (any of) {%v} but got %v containing %v", tpe, strings.Join(expected, ", "), tpe, ast.TypeName(got)) + return NewOperandErr(pos, "must be %v of (any of) {%v} but got %v containing %v", tpe, strings.Join(expected, ", "), tpe, ast.ValueName(got)) } // NewOperandEnumErr returns an operand error indicating a value was wrong. @@ -233,7 +234,7 @@ func ObjectOperand(x ast.Value, pos int) (ast.Object, error) { func ArrayOperand(x ast.Value, pos int) (*ast.Array, error) { a, ok := x.(*ast.Array) if !ok { - return ast.NewArray(), NewOperandTypeErr(pos, x, "array") + return nil, NewOperandTypeErr(pos, x, "array") } return a, nil } @@ -262,7 +263,7 @@ func NumberToInt(n ast.Number) (*big.Int, error) { f := NumberToFloat(n) r, accuracy := f.Int(nil) if accuracy != big.Exact { - return nil, fmt.Errorf("illegal value") + return nil, errors.New("illegal value") } return r, nil } @@ -309,7 +310,7 @@ func RuneSliceOperand(x ast.Value, pos int) ([]rune, error) { } var f = make([]rune, a.Len()) - for k := 0; k < a.Len(); k++ { + for k := range a.Len() { b := a.Elem(k) c, ok := b.Value.(ast.String) if !ok { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go similarity index 84% rename from vendor/github.com/open-policy-agent/opa/topdown/cache.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go index 265457e02f1..42fb6ad3f58 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/cache.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) // VirtualCache defines the interface for a cache that stores the results of @@ -32,13 +32,19 @@ type VirtualCache interface { Keys() []ast.Ref } +// BaseCache defines the interface for a cache that stores cached base documents, i.e. data. +type BaseCache interface { + Get(ast.Ref) ast.Value + Put(ast.Ref, ast.Value) +} + type virtualCache struct { stack []*virtualCacheElem } type virtualCacheElem struct { value *ast.Term - children *util.HashMap + children *util.HasherMap[*ast.Term, *virtualCacheElem] undefined bool } @@ -65,12 +71,12 @@ func (c *virtualCache) Pop() { // ast.Term, true is impossible func (c *virtualCache) Get(ref ast.Ref) (*ast.Term, bool) { node := c.stack[len(c.stack)-1] - for i := 0; i < len(ref); i++ { + for i := range ref { x, ok := node.children.Get(ref[i]) if !ok { return nil, false } - node = x.(*virtualCacheElem) + node = x } if node.undefined { return nil, true @@ -83,10 +89,10 @@ func (c *virtualCache) Get(ref ast.Ref) (*ast.Term, bool) { // indicate that the Ref has resolved to undefined. func (c *virtualCache) Put(ref ast.Ref, value *ast.Term) { node := c.stack[len(c.stack)-1] - for i := 0; i < len(ref); i++ { + for i := range ref { x, ok := node.children.Get(ref[i]) if ok { - node = x.(*virtualCacheElem) + node = x } else { next := newVirtualCacheElem() node.children.Put(ref[i], next) @@ -107,13 +113,13 @@ func (c *virtualCache) Keys() []ast.Ref { func keysRecursive(root ast.Ref, node *virtualCacheElem) []ast.Ref { var keys []ast.Ref - node.children.Iter(func(k, v util.T) bool { - ref := root.Append(k.(*ast.Term)) - if v.(*virtualCacheElem).value != nil { + node.children.Iter(func(k *ast.Term, v *virtualCacheElem) bool { + ref := root.Append(k) + if v.value != nil { keys = append(keys, ref) } - if v.(*virtualCacheElem).children.Len() > 0 { - keys = append(keys, keysRecursive(ref, v.(*virtualCacheElem))...) + if v.children.Len() > 0 { + keys = append(keys, keysRecursive(ref, v)...) } return false }) @@ -124,12 +130,8 @@ func newVirtualCacheElem() *virtualCacheElem { return &virtualCacheElem{children: newVirtualCacheHashMap()} } -func newVirtualCacheHashMap() *util.HashMap { - return util.NewHashMap(func(a, b util.T) bool { - return a.(*ast.Term).Equal(b.(*ast.Term)) - }, func(x util.T) int { - return x.(*ast.Term).Hash() - }) +func newVirtualCacheHashMap() *util.HasherMap[*ast.Term, *virtualCacheElem] { + return util.NewHasherMap[*ast.Term, *virtualCacheElem](ast.TermValueEqual) } // baseCache implements a trie structure to cache base documents read out of @@ -148,11 +150,17 @@ func newBaseCache() *baseCache { func (c *baseCache) Get(ref ast.Ref) ast.Value { node := c.root - for i := 0; i < len(ref); i++ { + for i := range ref { node = node.children[ref[i].Value] if node == nil { return nil } else if node.value != nil { + if len(ref) == 1 && ast.IsScalar(node.value) { + // If the node is a scalar, return the value directly + // and avoid an allocation when calling Find. + return node.value + } + result, err := node.value.Find(ref[i+1:]) if err != nil { return nil @@ -165,7 +173,7 @@ func (c *baseCache) Get(ref ast.Ref) ast.Value { func (c *baseCache) Put(ref ast.Ref, value ast.Value) { node := c.root - for i := 0; i < len(ref); i++ { + for i := range ref { if child, ok := node.children[ref[i].Value]; ok { node = child } else { @@ -232,7 +240,7 @@ type comprehensionCache struct { type comprehensionCacheElem struct { value *ast.Term - children *util.HashMap + children *util.HasherMap[*ast.Term, *comprehensionCacheElem] } func newComprehensionCache() *comprehensionCache { @@ -264,22 +272,22 @@ func newComprehensionCacheElem() *comprehensionCacheElem { func (c *comprehensionCacheElem) Get(key []*ast.Term) *ast.Term { node := c - for i := 0; i < len(key); i++ { + for i := range key { x, ok := node.children.Get(key[i]) if !ok { return nil } - node = x.(*comprehensionCacheElem) + node = x } return node.value } func (c *comprehensionCacheElem) Put(key []*ast.Term, value *ast.Term) { node := c - for i := 0; i < len(key); i++ { + for i := range key { x, ok := node.children.Get(key[i]) if ok { - node = x.(*comprehensionCacheElem) + node = x } else { next := newComprehensionCacheElem() node.children.Put(key[i], next) @@ -289,12 +297,8 @@ func (c *comprehensionCacheElem) Put(key []*ast.Term, value *ast.Term) { node.value = value } -func newComprehensionCacheHashMap() *util.HashMap { - return util.NewHashMap(func(a, b util.T) bool { - return a.(*ast.Term).Equal(b.(*ast.Term)) - }, func(x util.T) int { - return x.(*ast.Term).Hash() - }) +func newComprehensionCacheHashMap() *util.HasherMap[*ast.Term, *comprehensionCacheElem] { + return util.NewHasherMap[*ast.Term, *comprehensionCacheElem](ast.TermValueEqual) } type functionMocksStack struct { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go similarity index 63% rename from vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go index 55ed340619e..60f38aaba2f 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go @@ -13,8 +13,8 @@ import ( "sync" "time" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) const ( @@ -24,16 +24,36 @@ const ( defaultStaleEntryEvictionPeriodSeconds = int64(0) // never ) +var interQueryBuiltinValueCacheDefaultConfigs = map[string]*NamedValueCacheConfig{} + +func getDefaultInterQueryBuiltinValueCacheConfig(name string) *NamedValueCacheConfig { + return interQueryBuiltinValueCacheDefaultConfigs[name] +} + +// RegisterDefaultInterQueryBuiltinValueCacheConfig registers a default configuration for the inter-query value cache; +// used when none has been explicitly configured. +// To disable a named cache when not configured, pass a nil config. +func RegisterDefaultInterQueryBuiltinValueCacheConfig(name string, config *NamedValueCacheConfig) { + interQueryBuiltinValueCacheDefaultConfigs[name] = config +} + // Config represents the configuration for the inter-query builtin cache. type Config struct { InterQueryBuiltinCache InterQueryBuiltinCacheConfig `json:"inter_query_builtin_cache"` InterQueryBuiltinValueCache InterQueryBuiltinValueCacheConfig `json:"inter_query_builtin_value_cache"` } +// NamedValueCacheConfig represents the configuration of a named cache that built-in functions can utilize. +// A default configuration to be used if not explicitly configured can be registered using RegisterDefaultInterQueryBuiltinValueCacheConfig. +type NamedValueCacheConfig struct { + MaxNumEntries *int `json:"max_num_entries,omitempty"` +} + // InterQueryBuiltinValueCacheConfig represents the configuration of the inter-query value cache that built-in functions can utilize. // MaxNumEntries - max number of cache entries type InterQueryBuiltinValueCacheConfig struct { - MaxNumEntries *int `json:"max_num_entries,omitempty"` + MaxNumEntries *int `json:"max_num_entries,omitempty"` + NamedCacheConfigs map[string]*NamedValueCacheConfig `json:"named,omitempty"` } // InterQueryBuiltinCacheConfig represents the configuration of the inter-query cache that built-in functions can utilize. @@ -59,8 +79,16 @@ func ParseCachingConfig(raw []byte) (*Config, error) { maxInterQueryBuiltinValueCacheSize := new(int) *maxInterQueryBuiltinValueCacheSize = defaultInterQueryBuiltinValueCacheSize - return &Config{InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{MaxSizeBytes: maxSize, ForcedEvictionThresholdPercentage: threshold, StaleEntryEvictionPeriodSeconds: period}, - InterQueryBuiltinValueCache: InterQueryBuiltinValueCacheConfig{MaxNumEntries: maxInterQueryBuiltinValueCacheSize}}, nil + return &Config{ + InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{ + MaxSizeBytes: maxSize, + ForcedEvictionThresholdPercentage: threshold, + StaleEntryEvictionPeriodSeconds: period, + }, + InterQueryBuiltinValueCache: InterQueryBuiltinValueCacheConfig{ + MaxNumEntries: maxInterQueryBuiltinValueCacheSize, + }, + }, nil } var config Config @@ -114,6 +142,13 @@ func (c *Config) validateAndInjectDefaults() error { } } + for name, namedConfig := range c.InterQueryBuiltinValueCache.NamedCacheConfigs { + numEntries := *namedConfig.MaxNumEntries + if numEntries < 0 { + return fmt.Errorf("invalid max_num_entries %v for named cache %v", numEntries, name) + } + } + return nil } @@ -154,11 +189,14 @@ func NewInterQueryCache(config *Config) InterQueryCache { func NewInterQueryCacheWithContext(ctx context.Context, config *Config) InterQueryCache { iqCache := newCache(config) if iqCache.staleEntryEvictionTimePeriodSeconds() > 0 { - cleanupTicker := time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) go func() { + cleanupTicker := time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) for { select { case <-cleanupTicker.C: + // NOTE: We stop the ticker and create a new one here to ensure that applications + // get _at least_ staleEntryEvictionTimePeriodSeconds with the cache unlocked; + // see https://github.com/open-policy-agent/opa/pull/7188/files#r1855342998 cleanupTicker.Stop() iqCache.cleanStaleValues() cleanupTicker = time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) @@ -287,7 +325,7 @@ func (c *cache) unsafeDelete(k ast.Value) { c.l.Remove(cacheItem.keyElement) } -func (c *cache) unsafeClone(value InterQueryCacheValue) (InterQueryCacheValue, error) { +func (*cache) unsafeClone(value InterQueryCacheValue) (InterQueryCacheValue, error) { return value.Clone() } @@ -327,62 +365,59 @@ func (c *cache) cleanStaleValues() (dropped int) { return dropped } -type InterQueryValueCache interface { +type InterQueryValueCacheBucket interface { Get(key ast.Value) (value any, found bool) Insert(key ast.Value, value any) int Delete(key ast.Value) - UpdateConfig(config *Config) } -type interQueryValueCache struct { - items map[string]any - config *Config +type interQueryValueCacheBucket struct { + items util.HasherMap[ast.Value, any] + config *NamedValueCacheConfig mtx sync.RWMutex } -// Get returns the value in the cache for k. -func (c *interQueryValueCache) Get(k ast.Value) (any, bool) { +func newItemsMap() *util.HasherMap[ast.Value, any] { + return util.NewHasherMap[ast.Value, any](ast.ValueEqual) +} + +func (c *interQueryValueCacheBucket) Get(k ast.Value) (any, bool) { c.mtx.RLock() defer c.mtx.RUnlock() - value, ok := c.items[k.String()] - return value, ok + return c.items.Get(k) } -// Insert inserts a key k into the cache with value v. -func (c *interQueryValueCache) Insert(k ast.Value, v any) (dropped int) { +func (c *interQueryValueCacheBucket) Insert(k ast.Value, v any) (dropped int) { c.mtx.Lock() defer c.mtx.Unlock() maxEntries := c.maxNumEntries() if maxEntries > 0 { - if len(c.items) >= maxEntries { - itemsToRemove := len(c.items) - maxEntries + 1 + l := c.items.Len() + if l >= maxEntries { + itemsToRemove := l - maxEntries + 1 // Delete a (semi-)random key to make room for the new one. - for k := range c.items { - delete(c.items, k) + c.items.Iter(func(k ast.Value, _ any) bool { + c.items.Delete(k) dropped++ - if itemsToRemove == dropped { - break - } - } + return itemsToRemove == dropped + }) } } - c.items[k.String()] = v + c.items.Put(k, v) return dropped } -// Delete deletes the value in the cache for k. -func (c *interQueryValueCache) Delete(k ast.Value) { +func (c *interQueryValueCacheBucket) Delete(k ast.Value) { c.mtx.Lock() defer c.mtx.Unlock() - delete(c.items, k.String()) + c.items.Delete(k) } -// UpdateConfig updates the cache config. -func (c *interQueryValueCache) UpdateConfig(config *Config) { +func (c *interQueryValueCacheBucket) updateConfig(config *NamedValueCacheConfig) { if config == nil { return } @@ -391,16 +426,149 @@ func (c *interQueryValueCache) UpdateConfig(config *Config) { c.config = config } -func (c *interQueryValueCache) maxNumEntries() int { +func (c *interQueryValueCacheBucket) maxNumEntries() int { if c.config == nil { return defaultInterQueryBuiltinValueCacheSize } - return *c.config.InterQueryBuiltinValueCache.MaxNumEntries + return *c.config.MaxNumEntries +} + +type InterQueryValueCache interface { + InterQueryValueCacheBucket + GetCache(name string) InterQueryValueCacheBucket + UpdateConfig(config *Config) } func NewInterQueryValueCache(_ context.Context, config *Config) InterQueryValueCache { - return &interQueryValueCache{ - items: map[string]any{}, - config: config, + var c *InterQueryBuiltinValueCacheConfig + var nc *NamedValueCacheConfig + if config != nil { + c = &config.InterQueryBuiltinValueCache + // NOTE: This is a side-effect of reusing the interQueryValueCacheBucket as the global cache. + // It's a hidden implementation detail that we can clean up in the future when revisiting the named caches + // to automatically apply them to any built-in instead of the global cache. + nc = &NamedValueCacheConfig{ + MaxNumEntries: c.MaxNumEntries, + } + } + + return &interQueryBuiltinValueCache{ + globalCache: interQueryValueCacheBucket{ + items: *newItemsMap(), + config: nc, + }, + namedCaches: map[string]*interQueryValueCacheBucket{}, + config: c, + } +} + +type interQueryBuiltinValueCache struct { + globalCache interQueryValueCacheBucket + namedCachesLock sync.RWMutex + namedCaches map[string]*interQueryValueCacheBucket + config *InterQueryBuiltinValueCacheConfig +} + +func (c *interQueryBuiltinValueCache) Get(k ast.Value) (any, bool) { + if c == nil { + return nil, false + } + + return c.globalCache.Get(k) +} + +func (c *interQueryBuiltinValueCache) Insert(k ast.Value, v any) int { + if c == nil { + return 0 + } + + return c.globalCache.Insert(k, v) +} + +func (c *interQueryBuiltinValueCache) Delete(k ast.Value) { + if c == nil { + return + } + + c.globalCache.Delete(k) +} + +func (c *interQueryBuiltinValueCache) GetCache(name string) InterQueryValueCacheBucket { + if c == nil { + return nil + } + + if c.namedCaches == nil { + return nil + } + + c.namedCachesLock.RLock() + nc, ok := c.namedCaches[name] + c.namedCachesLock.RUnlock() + + if !ok { + c.namedCachesLock.Lock() + defer c.namedCachesLock.Unlock() + + if nc, ok := c.namedCaches[name]; ok { + // Some other goroutine has created the cache while we were waiting for the lock. + return nc + } + + var config *NamedValueCacheConfig + if c.config != nil { + config = c.config.NamedCacheConfigs[name] + if config == nil { + config = getDefaultInterQueryBuiltinValueCacheConfig(name) + } + } + + if config == nil { + // No config, cache disabled. + return nil + } + + nc = &interQueryValueCacheBucket{ + items: *newItemsMap(), + config: config, + } + + c.namedCaches[name] = nc + } + + return nc +} + +func (c *interQueryBuiltinValueCache) UpdateConfig(config *Config) { + if c == nil { + return + } + + if config == nil { + c.globalCache.updateConfig(nil) + } else { + + c.globalCache.updateConfig(&NamedValueCacheConfig{ + MaxNumEntries: config.InterQueryBuiltinValueCache.MaxNumEntries, + }) + } + + c.namedCachesLock.Lock() + defer c.namedCachesLock.Unlock() + + c.config = &config.InterQueryBuiltinValueCache + + for name, nc := range c.namedCaches { + // For each named cache: if it has a config, update it; if no config, remove it. + namedConfig := c.config.NamedCacheConfigs[name] + if namedConfig == nil { + namedConfig = getDefaultInterQueryBuiltinValueCacheConfig(name) + } + + if namedConfig == nil { + delete(c.namedCaches, name) + } else { + nc.updateConfig(namedConfig) + } } } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cancel.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/topdown/cancel.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go diff --git a/vendor/github.com/open-policy-agent/opa/topdown/casts.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go similarity index 76% rename from vendor/github.com/open-policy-agent/opa/topdown/casts.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go index 2eb8f97fc98..f3953248412 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/casts.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go @@ -6,24 +6,38 @@ package topdown import ( "strconv" + "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinToNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch a := operands[0].Value.(type) { case ast.Null: - return iter(ast.NumberTerm("0")) + return iter(ast.InternedIntNumberTerm(0)) case ast.Boolean: if a { - return iter(ast.NumberTerm("1")) + return iter(ast.InternedIntNumberTerm(1)) } - return iter(ast.NumberTerm("0")) + return iter(ast.InternedIntNumberTerm(0)) case ast.Number: - return iter(ast.NewTerm(a)) + return iter(operands[0]) case ast.String: - _, err := strconv.ParseFloat(string(a), 64) + strValue := string(a) + + if it := ast.InternedIntNumberTermFromString(strValue); it != nil { + return iter(it) + } + + trimmedVal := strings.TrimLeft(strValue, "+-") + lowerCaseVal := strings.ToLower(trimmedVal) + + if lowerCaseVal == "inf" || lowerCaseVal == "infinity" || lowerCaseVal == "nan" { + return builtins.NewOperandTypeErr(1, operands[0].Value, "valid number string") + } + + _, err := strconv.ParseFloat(strValue, 64) if err != nil { return err } @@ -32,7 +46,7 @@ func builtinToNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term return builtins.NewOperandTypeErr(1, operands[0].Value, "null", "boolean", "number", "string") } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case *ast.Array: @@ -50,7 +64,7 @@ func builtinToArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case *ast.Array: @@ -66,7 +80,7 @@ func builtinToSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.String: @@ -76,7 +90,7 @@ func builtinToString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.Boolean: @@ -86,7 +100,7 @@ func builtinToBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.Null: @@ -96,7 +110,7 @@ func builtinToNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToObject(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.Object: diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cidr.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go similarity index 94% rename from vendor/github.com/open-policy-agent/opa/topdown/cidr.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go index 5b011bd1613..00c034656b8 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/cidr.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go @@ -8,9 +8,9 @@ import ( "net" "sort" - "github.com/open-policy-agent/opa/ast" cidrMerge "github.com/open-policy-agent/opa/internal/cidr/merge" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func getNetFromOperand(v ast.Value) (*net.IPNet, error) { @@ -31,7 +31,7 @@ func getLastIP(cidr *net.IPNet) (net.IP, error) { prefixLen, bits := cidr.Mask.Size() if prefixLen == 0 && bits == 0 { // non-standard mask, see https://golang.org/pkg/net/#IPMask.Size - return nil, fmt.Errorf("CIDR mask is in non-standard format") + return nil, errors.New("CIDR mask is in non-standard format") } var lastIP []byte if prefixLen == bits { @@ -75,7 +75,7 @@ func builtinNetCIDRIntersects(_ BuiltinContext, operands []*ast.Term, iter func( // If either net contains the others starting IP they are overlapping cidrsOverlap := cidrnetA.Contains(cidrnetB.IP) || cidrnetB.Contains(cidrnetA.IP) - return iter(ast.BooleanTerm(cidrsOverlap)) + return iter(ast.InternedBooleanTerm(cidrsOverlap)) } func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -92,7 +92,7 @@ func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*a ip := net.ParseIP(string(bStr)) if ip != nil { - return iter(ast.BooleanTerm(cidrnetA.Contains(ip))) + return iter(ast.InternedBooleanTerm(cidrnetA.Contains(ip))) } // It wasn't an IP, try and parse it as a CIDR @@ -113,7 +113,7 @@ func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*a cidrContained = cidrnetA.Contains(lastIP) } - return iter(ast.BooleanTerm(cidrContained)) + return iter(ast.InternedBooleanTerm(cidrContained)) } var errNetCIDRContainsMatchElementType = errors.New("element must be string or non-empty array") @@ -137,12 +137,12 @@ func evalNetCIDRContainsMatchesOperand(operand int, a *ast.Term, iter func(cidr, case ast.String: return iter(a, a) case *ast.Array: - for i := 0; i < v.Len(); i++ { + for i := range v.Len() { cidr, err := getCIDRMatchTerm(v.Elem(i)) if err != nil { return fmt.Errorf("operand %v: %v", operand, err) } - if err := iter(cidr, ast.IntNumberTerm(i)); err != nil { + if err := iter(cidr, ast.InternedIntNumberTerm(i)); err != nil { return err } } @@ -219,13 +219,13 @@ func builtinNetCIDRExpand(bctx BuiltinContext, operands []*ast.Term, iter func(* func builtinNetCIDRIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { cidr, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } if _, _, err := net.ParseCIDR(string(cidr)); err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) } type cidrBlockRange struct { @@ -255,7 +255,7 @@ func (c cidrBlockRanges) Less(i, j int) bool { } // Then compare first IP. - cmp = bytes.Compare(*c[i].First, *c[i].First) + cmp = bytes.Compare(*c[i].First, *c[j].First) if cmp < 0 { return true } else if cmp > 0 { @@ -274,7 +274,7 @@ func builtinNetCIDRMerge(_ BuiltinContext, operands []*ast.Term, iter func(*ast. switch v := operands[0].Value.(type) { case *ast.Array: - for i := 0; i < v.Len(); i++ { + for i := range v.Len() { network, err := generateIPNet(v.Elem(i)) if err != nil { return err diff --git a/vendor/github.com/open-policy-agent/opa/topdown/comparison.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go similarity index 90% rename from vendor/github.com/open-policy-agent/opa/topdown/comparison.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go index 0d033d2c32a..9e1585a28ab 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/comparison.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go @@ -4,7 +4,7 @@ package topdown -import "github.com/open-policy-agent/opa/ast" +import "github.com/open-policy-agent/opa/v1/ast" type compareFunc func(a, b ast.Value) bool @@ -34,7 +34,7 @@ func compareEq(a, b ast.Value) bool { func builtinCompare(cmp compareFunc) BuiltinFunc { return func(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - return iter(ast.BooleanTerm(cmp(operands[0].Value, operands[1].Value))) + return iter(ast.InternedBooleanTerm(cmp(operands[0].Value, operands[1].Value))) } } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go similarity index 97% rename from vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go index 8824d19bd21..9f4beca54a9 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go @@ -8,7 +8,7 @@ import ( "fmt" "sort" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // CopyPropagator implements a simple copy propagation optimization to remove @@ -209,7 +209,7 @@ func (p *CopyPropagator) Apply(query ast.Body) ast.Body { // plugBindings applies the binding list and union-find to x. This process // removes as many variables as possible. -func (p *CopyPropagator) plugBindings(pctx *plugContext, expr *ast.Expr) *ast.Expr { +func (*CopyPropagator) plugBindings(pctx *plugContext, expr *ast.Expr) *ast.Expr { xform := bindingPlugTransform{ pctx: pctx, @@ -244,7 +244,7 @@ func (t bindingPlugTransform) Transform(x interface{}) (interface{}, error) { } } -func (t bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) ast.Value { +func (bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) ast.Value { var result ast.Value = v @@ -274,7 +274,7 @@ func (t bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) ast. return b } -func (t bindingPlugTransform) plugBindingsRef(pctx *plugContext, v ast.Ref) ast.Ref { +func (bindingPlugTransform) plugBindingsRef(pctx *plugContext, v ast.Ref) ast.Ref { // Apply union-find to remove redundant variables from input. if root, ok := pctx.uf.Find(v[0].Value); ok { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go similarity index 83% rename from vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go index 38ec56f315c..528c83a0f4b 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go @@ -7,25 +7,21 @@ package copypropagation import ( "fmt" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) type rankFunc func(*unionFindRoot, *unionFindRoot) (*unionFindRoot, *unionFindRoot) type unionFind struct { - roots *util.HashMap + roots *util.HasherMap[ast.Value, *unionFindRoot] parents *ast.ValueMap rank rankFunc } func newUnionFind(rank rankFunc) *unionFind { return &unionFind{ - roots: util.NewHashMap(func(a util.T, b util.T) bool { - return a.(ast.Value).Compare(b.(ast.Value)) == 0 - }, func(v util.T) int { - return v.(ast.Value).Hash() - }), + roots: util.NewHasherMap[ast.Value, *unionFindRoot](ast.ValueEqual), parents: ast.NewValueMap(), rank: rank, } @@ -53,7 +49,7 @@ func (uf *unionFind) Find(v ast.Value) (*unionFindRoot, bool) { if parent.Compare(v) == 0 { r, ok := uf.roots.Get(v) - return r.(*unionFindRoot), ok + return r, ok } return uf.Find(parent) @@ -93,13 +89,13 @@ func (uf *unionFind) String() string { map[string]ast.Value{}, } - uf.roots.Iter(func(k util.T, v util.T) bool { - o.Roots[k.(ast.Value).String()] = struct { + uf.roots.Iter(func(k ast.Value, v *unionFindRoot) bool { + o.Roots[k.String()] = struct { Constant *ast.Term Key ast.Value }{ - v.(*unionFindRoot).constant, - v.(*unionFindRoot).key, + v.constant, + v.key, } return true }) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go similarity index 89% rename from vendor/github.com/open-policy-agent/opa/topdown/crypto.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go index f24432a264a..dafbac78507 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go @@ -15,8 +15,10 @@ import ( "crypto/tls" "crypto/x509" "encoding/base64" + "encoding/hex" "encoding/json" "encoding/pem" + "errors" "fmt" "hash" "os" @@ -25,9 +27,9 @@ import ( "github.com/open-policy-agent/opa/internal/jwx/jwk" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" ) const ( @@ -96,7 +98,7 @@ func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*a } invalid := ast.ArrayTerm( - ast.BooleanTerm(false), + ast.InternedBooleanTerm(false), ast.NewTerm(ast.NewArray()), ) @@ -116,7 +118,7 @@ func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*a } valid := ast.ArrayTerm( - ast.BooleanTerm(true), + ast.InternedBooleanTerm(true), ast.NewTerm(value), ) @@ -152,14 +154,12 @@ func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, op return err } - invalid := ast.ArrayTerm( - ast.BooleanTerm(false), - ast.NewTerm(ast.NewArray()), - ) - certs, err := getX509CertsFromString(string(input)) if err != nil { - return iter(invalid) + return iter(ast.ArrayTerm( + ast.InternedBooleanTerm(false), + ast.NewTerm(ast.NewArray()), + )) } // Collect the cert verification options @@ -170,7 +170,10 @@ func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, op verified, err := verifyX509CertificateChain(certs, verifyOpt) if err != nil { - return iter(invalid) + return iter(ast.ArrayTerm( + ast.InternedBooleanTerm(false), + ast.NewTerm(ast.NewArray()), + )) } value, err := ast.InterfaceToValue(verified) @@ -178,12 +181,10 @@ func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, op return err } - valid := ast.ArrayTerm( - ast.BooleanTerm(true), + return iter(ast.ArrayTerm( + ast.InternedBooleanTerm(true), ast.NewTerm(value), - ) - - return iter(valid) + )) } func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err error) { @@ -204,7 +205,7 @@ func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err er if ok { verifyOpt.DNSName = strings.Trim(string(dns), "\"") } else { - return verifyOpt, fmt.Errorf("'DNSName' should be a string") + return verifyOpt, errors.New("'DNSName' should be a string") } case "CurrentTime": c, ok := options.Get(key).Value.(ast.Number) @@ -213,10 +214,10 @@ func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err er if ok { verifyOpt.CurrentTime = time.Unix(0, nanosecs) } else { - return verifyOpt, fmt.Errorf("'CurrentTime' should be a valid int64 number") + return verifyOpt, errors.New("'CurrentTime' should be a valid int64 number") } } else { - return verifyOpt, fmt.Errorf("'CurrentTime' should be a number") + return verifyOpt, errors.New("'CurrentTime' should be a number") } case "MaxConstraintComparisons": c, ok := options.Get(key).Value.(ast.Number) @@ -225,23 +226,23 @@ func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err er if ok { verifyOpt.MaxConstraintComparisions = maxComparisons } else { - return verifyOpt, fmt.Errorf("'MaxConstraintComparisons' should be a valid number") + return verifyOpt, errors.New("'MaxConstraintComparisons' should be a valid number") } } else { - return verifyOpt, fmt.Errorf("'MaxConstraintComparisons' should be a number") + return verifyOpt, errors.New("'MaxConstraintComparisons' should be a number") } case "KeyUsages": type forEach interface { Foreach(func(*ast.Term)) } var ks forEach - switch options.Get(key).Value.(type) { + switch v := options.Get(key).Value.(type) { case *ast.Array: - ks = options.Get(key).Value.(*ast.Array) + ks = v case ast.Set: - ks = options.Get(key).Value.(ast.Set) + ks = v default: - return verifyOpt, fmt.Errorf("'KeyUsages' should be an Array or Set") + return verifyOpt, errors.New("'KeyUsages' should be an Array or Set") } // Collect the x509.ExtKeyUsage values by looking up the @@ -262,7 +263,7 @@ func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err er return x509.VerifyOptions{}, fmt.Errorf("invalid entries for 'KeyUsages' found: %s", invalidKUsgs) } default: - return verifyOpt, fmt.Errorf("invalid key option") + return verifyOpt, errors.New("invalid key option") } } @@ -312,7 +313,7 @@ func builtinCryptoX509ParseCertificateRequest(_ BuiltinContext, operands []*ast. p, _ := pem.Decode(bytes) if p != nil && p.Type != blockTypeCertificateRequest { - return fmt.Errorf("invalid PEM-encoded certificate signing request") + return errors.New("invalid PEM-encoded certificate signing request") } if p != nil { bytes = p.Bytes @@ -354,7 +355,7 @@ func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter pemDataString := string(input) if pemDataString == "" { - return fmt.Errorf("input PEM data was empty") + return errors.New("input PEM data was empty") } // This built in must be supplied a valid PEM or base64 encoded string. @@ -374,7 +375,7 @@ func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter } if len(rawKeys) == 0 { - return iter(ast.NullTerm()) + return iter(ast.InternedNullTerm) } key, err := jwk.New(rawKeys[0]) @@ -408,7 +409,7 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter } if string(input) == "" { - return iter(ast.NullTerm()) + return iter(ast.InternedNullTerm) } // get the raw private key @@ -418,7 +419,7 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter } if len(rawKeys) == 0 { - return iter(ast.NewTerm(ast.NewArray())) + return iter(emptyArr) } bs, err := json.Marshal(rawKeys) @@ -439,36 +440,43 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter return iter(ast.NewTerm(value)) } -func hashHelper(a ast.Value, h func(ast.String) string) (ast.Value, error) { - s, err := builtins.StringOperand(a, 1) - if err != nil { - return nil, err - } - return ast.String(h(s)), nil +func toHexEncodedString(src []byte) string { + dst := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(dst, src) + return util.ByteSliceToString(dst) } func builtinCryptoMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", md5.Sum([]byte(s))) }) + s, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err } - return iter(ast.NewTerm(res)) + + md5sum := md5.Sum([]byte(s)) + + return iter(ast.StringTerm(toHexEncodedString(md5sum[:]))) } func builtinCryptoSha1(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", sha1.Sum([]byte(s))) }) + s, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err } - return iter(ast.NewTerm(res)) + + sha1sum := sha1.Sum([]byte(s)) + + return iter(ast.StringTerm(toHexEncodedString(sha1sum[:]))) } func builtinCryptoSha256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", sha256.Sum256([]byte(s))) }) + s, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err } - return iter(ast.NewTerm(res)) + + sha256sum := sha256.Sum256([]byte(s)) + + return iter(ast.StringTerm(toHexEncodedString(sha256sum[:]))) } func hmacHelper(operands []*ast.Term, iter func(*ast.Term) error, h func() hash.Hash) error { @@ -488,7 +496,7 @@ func hmacHelper(operands []*ast.Term, iter func(*ast.Term) error, h func() hash. mac.Write([]byte(message)) messageDigest := mac.Sum(nil) - return iter(ast.StringTerm(fmt.Sprintf("%x", messageDigest))) + return iter(ast.StringTerm(hex.EncodeToString(messageDigest))) } func builtinCryptoHmacMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -522,7 +530,7 @@ func builtinCryptoHmacEqual(_ BuiltinContext, operands []*ast.Term, iter func(*a res := hmac.Equal([]byte(mac1), []byte(mac2)) - return iter(ast.BooleanTerm(res)) + return iter(ast.InternedBooleanTerm(res)) } func init() { @@ -697,7 +705,7 @@ func addCACertsFromBytes(pool *x509.CertPool, pemBytes []byte) (*x509.CertPool, } if ok := pool.AppendCertsFromPEM(pemBytes); !ok { - return nil, fmt.Errorf("could not append certificates") + return nil, errors.New("could not append certificates") } return pool, nil @@ -725,9 +733,11 @@ func readCertFromFile(localCertFile string) ([]byte, error) { return certPEM, nil } +var beginPrefix = []byte("-----BEGIN ") + func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls.Certificate, error) { - if !strings.HasPrefix(string(certPemBlock), "-----BEGIN") { + if !bytes.HasPrefix(certPemBlock, beginPrefix) { s, err := base64.StdEncoding.DecodeString(string(certPemBlock)) if err != nil { return nil, err @@ -735,7 +745,7 @@ func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls. certPemBlock = s } - if !strings.HasPrefix(string(keyPemBlock), "-----BEGIN") { + if !bytes.HasPrefix(keyPemBlock, beginPrefix) { s, err := base64.StdEncoding.DecodeString(string(keyPemBlock)) if err != nil { return nil, err @@ -744,7 +754,7 @@ func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls. } // we assume it a DER certificate and try to convert it to a PEM. - if !bytes.HasPrefix(certPemBlock, []byte("-----BEGIN")) { + if !bytes.HasPrefix(certPemBlock, beginPrefix) { pemBlock := &pem.Block{ Type: "CERTIFICATE", diff --git a/vendor/github.com/open-policy-agent/opa/topdown/doc.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/topdown/doc.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go diff --git a/vendor/github.com/open-policy-agent/opa/topdown/encoding.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/topdown/encoding.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go index f3475a60d0f..a27a9c24506 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/encoding.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go @@ -15,9 +15,9 @@ import ( "sigs.k8s.io/yaml" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" ) func builtinJSONMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -144,10 +144,10 @@ func builtinJSONIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T str, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } - return iter(ast.BooleanTerm(json.Valid([]byte(str)))) + return iter(ast.InternedBooleanTerm(json.Valid([]byte(str)))) } func builtinBase64Encode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -175,11 +175,11 @@ func builtinBase64Decode(_ BuiltinContext, operands []*ast.Term, iter func(*ast. func builtinBase64IsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { str, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } _, err = base64.StdEncoding.DecodeString(string(str)) - return iter(ast.BooleanTerm(err == nil)) + return iter(ast.InternedBooleanTerm(err == nil)) } func builtinBase64UrlEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -355,12 +355,12 @@ func builtinYAMLUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast func builtinYAMLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { str, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } var x interface{} err = yaml.Unmarshal([]byte(str), &x) - return iter(ast.BooleanTerm(err == nil)) + return iter(ast.InternedBooleanTerm(err == nil)) } func builtinHexEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/errors.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go similarity index 98% rename from vendor/github.com/open-policy-agent/opa/topdown/errors.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go index 918df6c853a..cadd163198c 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/errors.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go @@ -8,7 +8,7 @@ import ( "errors" "fmt" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // Halt is a special error type that built-in function implementations return to indicate diff --git a/vendor/github.com/open-policy-agent/opa/topdown/eval.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go similarity index 85% rename from vendor/github.com/open-policy-agent/opa/topdown/eval.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go index 7884ac01e02..221b29d0059 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/eval.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go @@ -5,19 +5,21 @@ import ( "errors" "fmt" "io" - "sort" + "slices" "strconv" "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/copypropagation" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/types" + "sync" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/copypropagation" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" ) type evalIterator func(*eval) error @@ -57,60 +59,91 @@ func (ee deferredEarlyExitError) Error() string { return fmt.Sprintf("%v: deferred early exit", ee.e.query) } +// Note(æ): this struct is formatted for optimal alignment as it is big, internal and instantiated +// *very* frequently during evaluation. If you need to add fields here, please consider the alignment +// of the struct, and use something like betteralign (https://github.com/dkorunic/betteralign) if you +// need help with that. type eval struct { ctx context.Context metrics metrics.Metrics seed io.Reader + cancel Cancel + queryCompiler ast.QueryCompiler + store storage.Store + txn storage.Transaction + virtualCache VirtualCache + baseCache BaseCache + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + printHook print.Hook time *ast.Term - queryID uint64 queryIDFact *queryIDFactory parent *eval caller *eval - cancel Cancel - query ast.Body - queryCompiler ast.QueryCompiler - index int - indexing bool - earlyExit bool bindings *bindings - store storage.Store - baseCache *baseCache - txn storage.Transaction compiler *ast.Compiler input *ast.Term data *ast.Term external *resolverTrie targetStack *refStack - tracers []QueryTracer - traceEnabled bool traceLastLocation *ast.Location // Last location of a trace event. - plugTraceVars bool instr *Instrumentation builtins map[string]*Builtin builtinCache builtins.Cache ndBuiltinCache builtins.NDBCache functionMocks *functionMocksStack - virtualCache VirtualCache comprehensionCache *comprehensionCache - interQueryBuiltinCache cache.InterQueryCache - interQueryBuiltinValueCache cache.InterQueryValueCache saveSet *saveSet saveStack *saveStack saveSupport *saveSupport saveNamespace *ast.Term - skipSaveNamespace bool inliningControl *inliningControl - genvarprefix string - genvarid int runtime *ast.Term builtinErrors *builtinErrors - printHook print.Hook + roundTripper CustomizeRoundTripper + genvarprefix string + query ast.Body + tracers []QueryTracer tracingOpts tracing.Options + queryID uint64 + index int + genvarid int + indexing bool + earlyExit bool + traceEnabled bool + plugTraceVars bool + skipSaveNamespace bool findOne bool strictObjects bool + defined bool +} + +type evp struct { + pool sync.Pool +} + +func (ep *evp) Put(e *eval) { + ep.pool.Put(e) +} + +func (ep *evp) Get() *eval { + return ep.pool.Get().(*eval) +} + +var evalPool = evp{ + pool: sync.Pool{ + New: func() any { + return &eval{} + }, + }, } func (e *eval) Run(iter evalIterator) error { + if !e.traceEnabled { + // avoid function literal escaping to heap if we don't need the trace + return e.eval(iter) + } + e.traceEnter(e.query) return e.eval(func(e *eval) error { e.traceExit(e.query) @@ -129,10 +162,10 @@ func (e *eval) String() string { func (e *eval) string(s *strings.Builder) { fmt.Fprintf(s, "') + s.WriteByte('>') } func (e *eval) builtinFunc(name string) (*ast.Builtin, BuiltinFunc, bool) { @@ -151,25 +184,23 @@ func (e *eval) builtinFunc(name string) (*ast.Builtin, BuiltinFunc, bool) { return nil, nil, false } -func (e *eval) closure(query ast.Body) *eval { - cpy := *e +func (e *eval) closure(query ast.Body, cpy *eval) { + *cpy = *e cpy.index = 0 cpy.query = query cpy.queryID = cpy.queryIDFact.Next() cpy.parent = e cpy.findOne = false - return &cpy } -func (e *eval) child(query ast.Body) *eval { - cpy := *e +func (e *eval) child(query ast.Body, cpy *eval) { + *cpy = *e cpy.index = 0 cpy.query = query cpy.queryID = cpy.queryIDFact.Next() cpy.bindings = newBindings(cpy.queryID, e.instr) cpy.parent = e cpy.findOne = false - return &cpy } func (e *eval) next(iter evalIterator) error { @@ -335,6 +366,13 @@ func (e *eval) evalExpr(iter evalIterator) error { } if e.cancel != nil && e.cancel.Cancelled() { + if e.ctx != nil && e.ctx.Err() != nil { + return &Error{ + Code: CancelErr, + Message: e.ctx.Err().Error(), + err: e.ctx.Err(), + } + } return &Error{ Code: CancelErr, Message: "caller cancelled query execution", @@ -342,13 +380,9 @@ func (e *eval) evalExpr(iter evalIterator) error { } if e.index >= len(e.query) { - err := iter(e) - - if err != nil { + if err := iter(e); err != nil { switch err := err.(type) { - case *deferredEarlyExitError: - return wrapErr(err) - case *earlyExitError: + case *deferredEarlyExitError, *earlyExitError: return wrapErr(err) default: return err @@ -374,46 +408,110 @@ func (e *eval) evalExpr(iter evalIterator) error { } func (e *eval) evalStep(iter evalIterator) error { - expr := e.query[e.index] if expr.Negated { return e.evalNot(iter) } - var defined bool var err error + + // NOTE(æ): the reason why there's one branch for the tracing case and one almost + // identical branch below for when tracing is disabled is that the tracing case + // allocates wildly. These allocations are cause by the "defined" boolean variable + // escaping to the heap as its value is set from inside of closures. There may very + // well be more elegant solutions to this problem, but this is one that works, and + // saves several *million* allocations for some workloads. So feel free to refactor + // this, but do make sure that the common non-tracing case doesn't pay in allocations + // for something that is only needed when tracing is enabled. + if e.traceEnabled { + var defined bool + switch terms := expr.Terms.(type) { + case []*ast.Term: + switch { + case expr.IsEquality(): + err = e.unify(terms[1], terms[2], func() error { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + }) + default: + err = e.evalCall(terms, func() error { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + }) + } + case *ast.Term: + // generateVar inlined here to avoid extra allocations in hot path + rterm := ast.VarTerm(e.fmtVarTerm()) + err = e.unify(terms, rterm, func() error { + if e.saveSet.Contains(rterm, e.bindings) { + return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error { + return iter(e) + }) + } + if !e.bindings.Plug(rterm).Equal(ast.InternedBooleanTerm(false)) { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + } + return nil + }) + case *ast.Every: + eval := evalEvery{ + Every: terms, + e: e, + expr: expr, + } + err = eval.eval(func() error { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + }) + + default: // guard-rail for adding extra (Expr).Terms types + return fmt.Errorf("got %T terms: %[1]v", terms) + } + + if err != nil { + return err + } + + if !defined { + e.traceFail(expr) + } + + return nil + } + switch terms := expr.Terms.(type) { case []*ast.Term: switch { case expr.IsEquality(): err = e.unify(terms[1], terms[2], func() error { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + return iter(e) }) default: err = e.evalCall(terms, func() error { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + return iter(e) }) } case *ast.Term: - rterm := e.generateVar(fmt.Sprintf("term_%d_%d", e.queryID, e.index)) + // generateVar inlined here to avoid extra allocations in hot path + rterm := ast.VarTerm(e.fmtVarTerm()) err = e.unify(terms, rterm, func() error { if e.saveSet.Contains(rterm, e.bindings) { return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error { return iter(e) }) } - if !e.bindings.Plug(rterm).Equal(ast.BooleanTerm(false)) { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + if !e.bindings.Plug(rterm).Equal(ast.InternedBooleanTerm(false)) { + return iter(e) } return nil }) @@ -424,25 +522,28 @@ func (e *eval) evalStep(iter evalIterator) error { expr: expr, } err = eval.eval(func() error { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + return iter(e) }) default: // guard-rail for adding extra (Expr).Terms types return fmt.Errorf("got %T terms: %[1]v", terms) } - if err != nil { - return err - } + return err +} - if !defined { - e.traceFail(expr) - } +// Single-purpose fmt.Sprintf replacement for generating variable names with only +// one allocation performed instead of 4, and in 1/3 the time. +func (e *eval) fmtVarTerm() string { + buf := make([]byte, 0, len(e.genvarprefix)+util.NumDigitsUint(e.queryID)+util.NumDigitsInt(e.index)+7) - return nil + buf = append(buf, e.genvarprefix...) + buf = append(buf, "_term_"...) + buf = strconv.AppendUint(buf, e.queryID, 10) + buf = append(buf, '_') + buf = strconv.AppendInt(buf, int64(e.index), 10) + + return util.ByteSliceToString(buf) } func (e *eval) evalNot(iter evalIterator) error { @@ -453,27 +554,34 @@ func (e *eval) evalNot(iter evalIterator) error { return e.evalNotPartial(iter) } - negation := ast.NewBody(expr.Complement().NoWith()) - child := e.closure(negation) + negation := ast.NewBody(expr.ComplementNoWith()) + child := evalPool.Get() + defer evalPool.Put(child) - var defined bool - child.traceEnter(negation) + e.closure(negation, child) - err := child.eval(func(*eval) error { - child.traceExit(negation) - defined = true - child.traceRedo(negation) - return nil - }) + if e.traceEnabled { + child.traceEnter(negation) + } - if err != nil { + if err := child.eval(func(*eval) error { + if e.traceEnabled { + child.traceExit(negation) + child.traceRedo(negation) + } + child.defined = true + + return nil + }); err != nil { return err } - if !defined { + if !child.defined { return iter(e) } + child.defined = false + e.traceFail(expr) return nil } @@ -482,16 +590,18 @@ func (e *eval) evalWith(iter evalIterator) error { expr := e.query[e.index] - // Disable inlining on all references in the expression so the result of - // partial evaluation has the same semantics w/ the with statements - // preserved. var disable []ast.Ref - disableRef := func(x ast.Ref) bool { - disable = append(disable, x.GroundPrefix()) - return false - } if e.partial() { + // Avoid the `disable` var to escape to heap unless partial evaluation is enabled. + var disablePartial []ast.Ref + // Disable inlining on all references in the expression so the result of + // partial evaluation has the same semantics w/ the with statements + // preserved. + disableRef := func(x ast.Ref) bool { + disablePartial = append(disablePartial, x.GroundPrefix()) + return false + } // If the value is unknown the with statement cannot be evaluated and so // the entire expression should be saved to be safe. In the future this @@ -516,12 +626,15 @@ func (e *eval) evalWith(iter evalIterator) error { } ast.WalkRefs(expr.NoWith(), disableRef) + + disable = disablePartial } pairsInput := [][2]*ast.Term{} pairsData := [][2]*ast.Term{} - functionMocks := [][2]*ast.Term{} - targets := []ast.Ref{} + targets := make([]ast.Ref, 0, len(expr.With)) + + var functionMocks [][2]*ast.Term for i := range expr.With { target := expr.With[i].Target @@ -613,11 +726,14 @@ func (e *eval) evalWithPop(input, data *ast.Term) { } func (e *eval) evalNotPartial(iter evalIterator) error { - // Prepare query normally. expr := e.query[e.index] - negation := expr.Complement().NoWith() - child := e.closure(ast.NewBody(negation)) + negation := expr.ComplementNoWith() + + child := evalPool.Get() + defer evalPool.Put(child) + + e.closure(ast.NewBody(negation), child) // Unknowns is the set of variables that are marked as unknown. The variables // are namespaced with the query ID that they originate in. This ensures that @@ -710,9 +826,7 @@ func (e *eval) evalNotPartialSupport(negationID uint64, expr *ast.Expr, unknowns args = append(args, ast.NewTerm(v)) } - sort.Slice(args, func(i, j int) bool { - return args[i].Value.Compare(args[j].Value) < 0 - }) + slices.SortFunc(args, ast.TermValueCompare) if len(args) > 0 { head.Args = args @@ -747,7 +861,6 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { ref := terms[0].Value.(ast.Ref) - var mocked bool mock, mocked := e.functionMocks.Get(ref) if mocked { if m, ok := mock.Value.(ast.Ref); ok && isFunction(e.compiler.TypeEnv, m) { // builtin or data function @@ -770,7 +883,7 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { if ref[0].Equal(ast.DefaultRootDocument) { if mocked { f := e.compiler.TypeEnv.Get(ref).(*types.Function) - return e.evalCallValue(len(f.FuncArgs().Args), terms, mock, iter) + return e.evalCallValue(f.Arity(), terms, mock, iter) } var ir *ast.IndexResult @@ -800,11 +913,11 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { } if mocked { // value replacement of built-in call - return e.evalCallValue(len(bi.Decl.Args()), terms, mock, iter) + return e.evalCallValue(bi.Decl.Arity(), terms, mock, iter) } if e.unknown(e.query[e.index], e.bindings) { - return e.saveCall(len(bi.Decl.Args()), terms, iter) + return e.saveCall(bi.Decl.Arity(), terms, iter) } var parentID uint64 @@ -836,6 +949,7 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { PrintHook: e.printHook, DistributedTracingOpts: e.tracingOpts, Capabilities: capabilities, + RoundTripper: e.roundTripper, } eval := evalBuiltin{ @@ -855,7 +969,7 @@ func (e *eval) evalCallValue(arity int, terms []*ast.Term, mock *ast.Term, iter return e.unify(terms[len(terms)-1], mock, iter) case len(terms) == arity+1: - if mock.Value.Compare(ast.Boolean(false)) != 0 { + if !ast.Boolean(false).Equal(mock.Value) { return iter() } return nil @@ -932,6 +1046,22 @@ func (e *eval) biunifyArraysRec(a, b *ast.Array, b1, b2 *bindings, iter unifyIte }) } +func (e *eval) biunifyTerms(a, b []*ast.Term, b1, b2 *bindings, iter unifyIterator) error { + if len(a) != len(b) { + return nil + } + return e.biunifyTermsRec(a, b, b1, b2, iter, 0) +} + +func (e *eval) biunifyTermsRec(a, b []*ast.Term, b1, b2 *bindings, iter unifyIterator, idx int) error { + if idx == len(a) { + return iter() + } + return e.biunify(a[idx], b[idx], b1, b2, func() error { + return e.biunifyTermsRec(a, b, b1, b2, iter, idx+1) + }) +} + func (e *eval) biunifyObjects(a, b ast.Object, b1, b2 *bindings, iter unifyIterator) error { if a.Len() != b.Len() { return nil @@ -1057,7 +1187,7 @@ func (e *eval) biunifyRef(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) e: e, ref: ref, pos: 1, - plugged: ref.Copy(), + plugged: ref.CopyNonGround(), bindings: b1, rterm: b, rbindings: b2, @@ -1165,7 +1295,10 @@ func (e *eval) buildComprehensionCache(a *ast.Term) (*ast.Term, error) { } func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) { - child := e.child(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.child(x.Body, child) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1184,7 +1317,10 @@ func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*a } func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) { - child := e.child(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.child(x.Body, child) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1204,7 +1340,10 @@ func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.T } func (e *eval) buildComprehensionCacheObject(x *ast.ObjectComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) { - child := e.child(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.child(x.Body, child) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1285,7 +1424,11 @@ func (e *eval) amendComprehension(a *ast.Term, b1 *bindings) (*ast.Term, error) func (e *eval) biunifyComprehensionArray(x *ast.ArrayComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { result := ast.NewArray() - child := e.closure(x.Body) + child := evalPool.Get() + + e.closure(x.Body, child) + defer evalPool.Put(child) + err := child.Run(func(child *eval) error { result = result.Append(child.bindings.Plug(x.Term)) return nil @@ -1298,7 +1441,11 @@ func (e *eval) biunifyComprehensionArray(x *ast.ArrayComprehension, b *ast.Term, func (e *eval) biunifyComprehensionSet(x *ast.SetComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { result := ast.NewSet() - child := e.closure(x.Body) + child := evalPool.Get() + + e.closure(x.Body, child) + defer evalPool.Put(child) + err := child.Run(func(child *eval) error { result.Add(child.bindings.Plug(x.Term)) return nil @@ -1310,8 +1457,13 @@ func (e *eval) biunifyComprehensionSet(x *ast.SetComprehension, b *ast.Term, b1, } func (e *eval) biunifyComprehensionObject(x *ast.ObjectComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { + child := evalPool.Get() + defer evalPool.Put(child) + + e.closure(x.Body, child) + result := ast.NewObject() - child := e.closure(x.Body) + err := child.Run(func(child *eval) error { key := child.bindings.Plug(x.Key) value := child.bindings.Plug(x.Value) @@ -1354,7 +1506,7 @@ func (e *eval) saveExprMarkUnknowns(expr *ast.Expr, b *bindings, iter unifyItera e.traceSave(expr) err = iter() e.saveStack.Pop() - for i := 0; i < pops; i++ { + for range pops { e.saveSet.Pop() } return err @@ -1384,7 +1536,7 @@ func (e *eval) saveUnify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) e err := iter() e.saveStack.Pop() - for i := 0; i < pops; i++ { + for range pops { e.saveSet.Pop() } @@ -1411,7 +1563,7 @@ func (e *eval) saveCall(declArgsLen int, terms []*ast.Term, iter unifyIterator) err := iter() e.saveStack.Pop() - for i := 0; i < pops; i++ { + for range pops { e.saveSet.Pop() } return err @@ -1433,7 +1585,7 @@ func (e *eval) saveInlinedNegatedExprs(exprs []*ast.Expr, iter unifyIterator) er e.traceSave(expr) } err := iter() - for i := 0; i < len(exprs); i++ { + for range exprs { e.saveStack.Pop() } return err @@ -1448,12 +1600,22 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) return nil, nil } + resolver := resolverPool.Get().(*evalResolver) + defer func() { + resolver.e = nil + resolver.args = nil + resolverPool.Put(resolver) + }() + var result *ast.IndexResult var err error if e.indexing { - result, err = index.Lookup(&evalResolver{e: e, args: args}) + resolver.e = e + resolver.args = args + result, err = index.Lookup(resolver) } else { - result, err = index.AllRules(&evalResolver{e: e}) + resolver.e = e + result, err = index.AllRules(resolver) } if err != nil { return nil, err @@ -1461,20 +1623,27 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) result.EarlyExit = result.EarlyExit && e.earlyExit - var msg strings.Builder - if len(result.Rules) == 1 { - msg.WriteString("(matched 1 rule") - } else { - msg.Grow(len("(matched NNNN rules)")) - msg.WriteString("(matched ") - msg.WriteString(strconv.Itoa(len(result.Rules))) - msg.WriteString(" rules") - } - if result.EarlyExit { - msg.WriteString(", early exit") + if e.traceEnabled { + var msg strings.Builder + if len(result.Rules) == 1 { + msg.WriteString("(matched 1 rule") + } else { + msg.Grow(len("(matched NNNN rules)")) + msg.WriteString("(matched ") + msg.WriteString(strconv.Itoa(len(result.Rules))) + msg.WriteString(" rules") + } + if result.EarlyExit { + msg.WriteString(", early exit") + } + msg.WriteRune(')') + + // Copy ref here as ref otherwise always escapes to the heap, + // whether tracing is enabled or not. + r := ref.Copy() + e.traceIndex(e.query[e.index], msg.String(), &r) } - msg.WriteRune(')') - e.traceIndex(e.query[e.index], msg.String(), &ref) + return result, err } @@ -1487,10 +1656,20 @@ type evalResolver struct { args []*ast.Term } +var ( + resolverPool = sync.Pool{ + New: func() any { + return &evalResolver{} + }, + } +) + func (e *evalResolver) Resolve(ref ast.Ref) (ast.Value, error) { e.e.instr.startTimer(evalOpResolve) - if e.e.inliningControl.Disabled(ref, true) || e.e.saveSet.Contains(ast.NewTerm(ref), nil) { + // NOTE(ae): nil check on saveSet to avoid ast.NewTerm allocation when not needed + if e.e.inliningControl.Disabled(ref, true) || (e.e.saveSet != nil && + e.e.saveSet.Contains(ast.NewTerm(ref), nil)) { e.e.instr.stopTimer(evalOpResolve) return nil, ast.UnknownValueErr{} } @@ -1568,7 +1747,7 @@ func (e *evalResolver) Resolve(ref ast.Ref) (ast.Value, error) { return merged, err } e.e.instr.stopTimer(evalOpResolve) - return nil, fmt.Errorf("illegal ref") + return nil, errors.New("illegal ref") } func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, error) { @@ -1611,16 +1790,7 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro } case ast.Object: if obj.Len() > 0 { - cpy := ast.NewObject() - if err := obj.Iter(func(k *ast.Term, v *ast.Term) error { - if !ast.SystemDocumentKey.Equal(k.Value) { - cpy.Insert(k, v) - } - return nil - }); err != nil { - return nil, err - } - blob = cpy + blob, _ = obj.Map(systemDocumentKeyRemoveMapper) } } } @@ -1653,8 +1823,21 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro return merged, nil } +func systemDocumentKeyRemoveMapper(k, v *ast.Term) (*ast.Term, *ast.Term, error) { + if ast.SystemDocumentKey.Equal(k.Value) { + return nil, nil, nil + } + return k, v, nil +} + func (e *eval) generateVar(suffix string) *ast.Term { - return ast.VarTerm(fmt.Sprintf("%v_%v", e.genvarprefix, suffix)) + buf := make([]byte, 0, len(e.genvarprefix)+len(suffix)+1) + + buf = append(buf, e.genvarprefix...) + buf = append(buf, '_') + buf = append(buf, suffix...) + + return ast.VarTerm(util.ByteSliceToString(buf)) } func (e *eval) rewrittenVar(v ast.Var) (ast.Var, bool) { @@ -1681,7 +1864,7 @@ func (e *eval) getDeclArgsLen(x *ast.Expr) (int, error) { bi, _, ok := e.builtinFunc(operator.String()) if ok { - return len(bi.Decl.Args()), nil + return bi.Decl.Arity(), nil } ir, err := e.getRules(operator, nil) @@ -1716,7 +1899,7 @@ func (e *evalBuiltin) canUseNDBCache(bi *ast.Builtin) bool { return bi.Nondeterministic && e.bctx.NDBuiltinCache != nil } -func (e evalBuiltin) eval(iter unifyIterator) error { +func (e *evalBuiltin) eval(iter unifyIterator) error { operands := make([]*ast.Term, len(e.terms)) @@ -1724,10 +1907,9 @@ func (e evalBuiltin) eval(iter unifyIterator) error { operands[i] = e.e.bindings.Plug(e.terms[i]) } - numDeclArgs := len(e.bi.Decl.FuncArgs().Args) + numDeclArgs := e.bi.Decl.Arity() e.e.instr.startTimer(evalOpBuiltinCall) - var err error // NOTE(philipc): We sometimes have to drop the very last term off // the args list for cases where a builtin's result is used/assigned, @@ -1749,7 +1931,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error { case e.bi.Decl.Result() == nil: return iter() case len(operands) == numDeclArgs: - if v.Compare(ast.Boolean(false)) == 0 { + if ast.Boolean(false).Equal(v) { return nil // nothing to do } return iter() @@ -1763,7 +1945,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error { } // Normal unification flow for builtins: - err = e.f(e.bctx, operands, func(output *ast.Term) error { + err := e.f(e.bctx, operands, func(output *ast.Term) error { e.e.instr.stopTimer(evalOpBuiltinCall) @@ -1773,7 +1955,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error { case e.bi.Decl.Result() == nil: err = iter() case len(operands) == numDeclArgs: - if output.Value.Compare(ast.Boolean(false)) != 0 { + if !ast.Boolean(false).Equal(output.Value) { err = iter() } // else: nothing to do, don't iter() default: @@ -1813,9 +1995,9 @@ func (e evalBuiltin) eval(iter unifyIterator) error { type evalFunc struct { e *eval + ir *ast.IndexResult ref ast.Ref terms []*ast.Term - ir *ast.IndexResult } func (e evalFunc) eval(iter unifyIterator) error { @@ -1837,15 +2019,36 @@ func (e evalFunc) eval(iter unifyIterator) error { return e.e.saveCall(argCount, e.terms, iter) } - if e.e.partial() && (e.e.inliningControl.shallow || e.e.inliningControl.Disabled(e.ref, false)) { - // check if the function definitions, or any of the arguments - // contain something unknown - unknown := e.e.unknown(e.ref, e.e.bindings) - for i := 1; !unknown && i <= argCount; i++ { - unknown = e.e.unknown(e.terms[i], e.e.bindings) + if e.e.partial() { + var mustGenerateSupport bool + + if defRule := e.ir.Default; defRule != nil { + // The presence of a default func might force us to generate support + if len(defRule.Head.Args) == len(e.terms)-1 { + // The function is called without collecting the result in an output term, + // therefore any successful evaluation of the function is of interest, including the default value ... + if ret := defRule.Head.Value; ret == nil || !ret.Equal(ast.InternedBooleanTerm(false)) { + // ... unless the default value is false, + mustGenerateSupport = true + } + } else { + // The function is called with an output term, therefore any successful evaluation of the function is of interest. + // NOTE: Because of how the compiler rewrites function calls, we can't know if the result value is compared + // to a constant value, so we can't be as clever as we are for rules. + mustGenerateSupport = true + } } - if unknown { - return e.partialEvalSupport(argCount, iter) + + if mustGenerateSupport || e.e.inliningControl.shallow || e.e.inliningControl.Disabled(e.ref, false) { + // check if the function definitions, or any of the arguments + // contain something unknown + unknown := e.e.unknown(e.ref, e.e.bindings) + for i := 1; !unknown && i <= argCount; i++ { + unknown = e.e.unknown(e.terms[i], e.e.bindings) + } + if unknown { + return e.partialEvalSupport(argCount, iter) + } } } @@ -1854,9 +2057,9 @@ func (e evalFunc) eval(iter unifyIterator) error { func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) error { var cacheKey ast.Ref - var hit bool - var err error if !e.e.partial() { + var hit bool + var err error cacheKey, hit, err = e.evalCache(argCount, iter) if err != nil { return err @@ -1865,12 +2068,23 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro } } + // NOTE(anders): While it makes the code a bit more complex, reusing the + // args slice across each function increment saves a lot of resources + // compared to creating a new one inside each call to evalOneRule... so + // think twice before simplifying this :) + args := make([]*ast.Term, len(e.terms)-1) + var prev *ast.Term return withSuppressEarlyExit(func() error { var outerEe *deferredEarlyExitError for _, rule := range e.ir.Rules { - next, err := e.evalOneRule(iter, rule, cacheKey, prev, findOne) + copy(args, rule.Head.Args) + if len(args) == len(rule.Head.Args)+1 { + args[len(args)-1] = rule.Head.Value + } + + next, err := e.evalOneRule(iter, rule, args, cacheKey, prev, findOne) if err != nil { if oee, ok := err.(*deferredEarlyExitError); ok { if outerEe == nil { @@ -1882,7 +2096,12 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro } if next == nil { for _, erule := range e.ir.Else[rule] { - next, err = e.evalOneRule(iter, erule, cacheKey, prev, findOne) + copy(args, erule.Head.Args) + if len(args) == len(erule.Head.Args)+1 { + args[len(args)-1] = erule.Head.Value + } + + next, err = e.evalOneRule(iter, erule, args, cacheKey, prev, findOne) if err != nil { if oee, ok := err.(*deferredEarlyExitError); ok { if outerEe == nil { @@ -1903,7 +2122,13 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro } if e.ir.Default != nil && prev == nil { - _, err := e.evalOneRule(iter, e.ir.Default, cacheKey, prev, findOne) + copy(args, e.ir.Default.Head.Args) + if len(args) == len(e.ir.Default.Head.Args)+1 { + args[len(args)-1] = e.ir.Default.Head.Value + } + + _, err := e.evalOneRule(iter, e.ir.Default, args, cacheKey, prev, findOne) + return err } @@ -1922,9 +2147,15 @@ func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, er } else { plen = len(e.terms) } + cacheKey := make([]*ast.Term, plen) - for i := 0; i < plen; i++ { - cacheKey[i] = e.e.bindings.Plug(e.terms[i]) + for i := range plen { + if e.terms[i].IsGround() { + // Avoid expensive copying of ref if it is ground. + cacheKey[i] = e.terms[i] + } else { + cacheKey[i] = e.e.bindings.Plug(e.terms[i]) + } } cached, _ := e.e.virtualCache.Get(cacheKey) @@ -1943,23 +2174,18 @@ func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, er return cacheKey, false, nil } -func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.Ref, prev *ast.Term, findOne bool) (*ast.Term, error) { +func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, args []*ast.Term, cacheKey ast.Ref, prev *ast.Term, findOne bool) (*ast.Term, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.findOne = findOne - args := make([]*ast.Term, len(e.terms)-1) - copy(args, rule.Head.Args) - - if len(args) == len(rule.Head.Args)+1 { - args[len(args)-1] = rule.Head.Value - } - var result *ast.Term child.traceEnter(rule) - err := child.biunifyArrays(ast.NewArray(e.terms[1:]...), ast.NewArray(args...), e.e.bindings, child.bindings, func() error { + err := child.biunifyTerms(e.terms[1:], args, e.e.bindings, child.bindings, func() error { return child.eval(func(child *eval) error { child.traceExit(rule) @@ -1976,28 +2202,24 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R e.e.virtualCache.Put(cacheKey, result) // the redos confirm this, or the evaluation is aborted } - if len(rule.Head.Args) == len(e.terms)-1 { - if result.Value.Compare(ast.Boolean(false)) == 0 { - if prev != nil && ast.Compare(prev, result) != 0 { - return functionConflictErr(rule.Location) - } - prev = result - return nil + if len(rule.Head.Args) == len(e.terms)-1 && ast.Boolean(false).Equal(result.Value) { + if prev != nil && !prev.Equal(result) { + return functionConflictErr(rule.Location) } + prev = result + return nil } // Partial evaluation should explore all rules and may not produce // a ground result so we do not perform conflict detection or // deduplication. See "ignore conflicts: functions" test case for // an example. - if !e.e.partial() { - if prev != nil { - if ast.Compare(prev, result) != 0 { - return functionConflictErr(rule.Location) - } - child.traceRedo(rule) - return nil + if !e.e.partial() && prev != nil { + if !prev.Equal(result) { + return functionConflictErr(rule.Location) } + child.traceRedo(rule) + return nil } prev = result @@ -2017,7 +2239,6 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error { path := e.e.namespaceRef(e.ref) - term := ast.NewTerm(path) if !e.e.saveSupport.Exists(path) { for _, rule := range e.ir.Rules { @@ -2026,18 +2247,29 @@ func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error return err } } + + if e.ir.Default != nil { + err := e.partialEvalSupportRule(e.ir.Default, path) + if err != nil { + return err + } + } } if !e.e.saveSupport.Exists(path) { // we haven't saved anything, nothing to call return nil } + term := ast.NewTerm(path) + return e.e.saveCall(declArgsLen, append([]*ast.Term{term}, e.terms[1:]...), iter) } func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -2070,8 +2302,9 @@ func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { } e.e.saveSupport.Insert(path, &ast.Rule{ - Head: head, - Body: plugged, + Head: head, + Body: plugged, + Default: rule.Default, }) } child.traceRedo(rule) @@ -2084,15 +2317,48 @@ func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { return err } +type deferredEarlyExitContainer struct { + deferred *deferredEarlyExitError +} + +func (dc *deferredEarlyExitContainer) handleErr(err error) error { + if err == nil { + return nil + } + + if dc.deferred == nil && errors.As(err, &dc.deferred) && dc.deferred != nil { + return nil + } + + return err +} + +// copyError returns a copy of the deferred early exit error if one is present. +// This exists only to allow the container to be reused. +func (dc *deferredEarlyExitContainer) copyError() *deferredEarlyExitError { + if dc.deferred == nil { + return nil + } + + cpy := *dc.deferred + return &cpy +} + +var deecPool = sync.Pool{ + New: func() any { + return &deferredEarlyExitContainer{} + }, +} + type evalTree struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int bindings *bindings rterm *ast.Term rbindings *bindings node *ast.TreeNode + ref ast.Ref + plugged ast.Ref + pos int } func (e evalTree) eval(iter unifyIterator) error { @@ -2115,9 +2381,7 @@ func (e evalTree) finish(iter unifyIterator) error { // In some cases, it may not be possible to PE the ref. If the path refers // to virtual docs that PE does not support or base documents where inlining // has been disabled, then we have to save. - save := e.e.unknown(e.plugged, e.e.bindings) - - if save { + if e.e.partial() && e.e.unknown(e.plugged, e.e.bindings) { return e.e.saveUnify(ast.NewTerm(e.plugged), e.rterm, e.bindings, e.rbindings, iter) } @@ -2171,28 +2435,20 @@ func (e evalTree) enumerate(iter unifyIterator) error { return err } - var deferredEe *deferredEarlyExitError - handleErr := func(err error) error { - var dee *deferredEarlyExitError - if errors.As(err, &dee) { - if deferredEe == nil { - deferredEe = dee - } - return nil - } - return err - } + dc := deecPool.Get().(*deferredEarlyExitContainer) + dc.deferred = nil + defer deecPool.Put(dc) if doc != nil { switch doc := doc.(type) { case *ast.Array: - for i := 0; i < doc.Len(); i++ { - k := ast.IntNumberTerm(i) + for i := range doc.Len() { + k := ast.InternedIntNumberTerm(i) err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { return e.next(iter, k) }) - if err := handleErr(err); err != nil { + if err := dc.handleErr(err); err != nil { return err } } @@ -2202,7 +2458,7 @@ func (e evalTree) enumerate(iter unifyIterator) error { err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { return e.next(iter, k) }) - if err := handleErr(err); err != nil { + if err := dc.handleErr(err); err != nil { return err } } @@ -2211,15 +2467,15 @@ func (e evalTree) enumerate(iter unifyIterator) error { err := e.e.biunify(elem, e.ref[e.pos], e.bindings, e.bindings, func() error { return e.next(iter, elem) }) - return handleErr(err) + return dc.handleErr(err) }); err != nil { return err } } } - if deferredEe != nil { - return deferredEe + if dc.deferred != nil { + return dc.copyError() } if e.node == nil { @@ -2317,12 +2573,12 @@ func (e evalTree) leaves(plugged ast.Ref, node *ast.TreeNode) (ast.Object, error type evalVirtual struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int bindings *bindings rterm *ast.Term rbindings *bindings + ref ast.Ref + plugged ast.Ref + pos int } func (e evalVirtual) eval(iter unifyIterator) error { @@ -2393,14 +2649,14 @@ func (e evalVirtual) eval(iter unifyIterator) error { type evalVirtualPartial struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int ir *ast.IndexResult bindings *bindings rterm *ast.Term rbindings *bindings empty *ast.Term + ref ast.Ref + plugged ast.Ref + pos int } type evalVirtualPartialCacheHint struct { @@ -2442,7 +2698,7 @@ func maxRefLength(rules []*ast.Rule, ceil int) int { for _, r := range rules { rl := len(r.Ref()) if r.Head.RuleKind() == ast.MultiValue { - rl = rl + 1 + rl++ } if rl >= ceil { return ceil @@ -2459,14 +2715,16 @@ func (e evalVirtualPartial) evalEachRule(iter unifyIterator, unknown bool) error return nil } - m := maxRefLength(e.ir.Rules, len(e.ref)) - if e.e.unknown(e.ref[e.pos+1:m], e.bindings) { - for _, rule := range e.ir.Rules { - if err := e.evalOneRulePostUnify(iter, rule); err != nil { - return err + if e.e.partial() { + m := maxRefLength(e.ir.Rules, len(e.ref)) + if e.e.unknown(e.ref[e.pos+1:m], e.bindings) { + for _, rule := range e.ir.Rules { + if err := e.evalOneRulePostUnify(iter, rule); err != nil { + return err + } } + return nil } - return nil } hint, err := e.evalCache(iter) @@ -2536,8 +2794,11 @@ func (e evalVirtualPartial) evalAllRulesNoCache(rules []*ast.Rule) (*ast.Term, e var visitedRefs []ast.Ref + child := evalPool.Get() + defer evalPool.Put(child) + for _, rule := range rules { - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) err := child.eval(func(*eval) error { child.traceExit(rule) @@ -2570,8 +2831,10 @@ func wrapInObjects(leaf *ast.Term, ref ast.Ref) *ast.Term { } func (e evalVirtualPartial) evalOneRulePreUnify(iter unifyIterator, rule *ast.Rule, result *ast.Term, unknown bool, visitedRefs *[]ast.Ref) (*ast.Term, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) var defined bool @@ -2663,7 +2926,10 @@ func (e *eval) biunifyDynamicRef(pos int, a, b ast.Ref, b1, b2 *bindings, iter u } func (e evalVirtualPartial) evalOneRulePostUnify(iter unifyIterator, rule *ast.Rule) error { - child := e.e.child(rule.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.e.child(rule.Body, child) child.traceEnter(rule) var defined bool @@ -2747,8 +3013,10 @@ func (e evalVirtualPartial) partialEvalSupport(iter unifyIterator) error { } func (e evalVirtualPartial) partialEvalSupportRule(rule *ast.Rule, _ ast.Ref) (bool, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -2977,7 +3245,7 @@ func (q vcKeyScope) Hash() int { return hash } -func (q vcKeyScope) IsGround() bool { +func (vcKeyScope) IsGround() bool { return false } @@ -3111,13 +3379,13 @@ func (e evalVirtualPartial) reduce(rule *ast.Rule, b *bindings, result *ast.Term type evalVirtualComplete struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int ir *ast.IndexResult bindings *bindings rterm *ast.Term rbindings *bindings + ref ast.Ref + plugged ast.Ref + pos int } func (e evalVirtualComplete) eval(iter unifyIterator) error { @@ -3226,8 +3494,10 @@ func (e evalVirtualComplete) evalValue(iter unifyIterator, findOne bool) error { } func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, prev *ast.Term, findOne bool) (*ast.Term, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.findOne = findOne child.traceEnter(rule) var result *ast.Term @@ -3262,9 +3532,11 @@ func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, p } func (e evalVirtualComplete) partialEval(iter unifyIterator) error { + child := evalPool.Get() + defer evalPool.Put(child) for _, rule := range e.ir.Rules { - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) err := child.eval(func(child *eval) error { @@ -3327,8 +3599,10 @@ func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error { } func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) (bool, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -3383,13 +3657,13 @@ func (e evalVirtualComplete) evalTerm(iter unifyIterator, term *ast.Term, termbi type evalTerm struct { e *eval - ref ast.Ref - pos int bindings *bindings term *ast.Term termbindings *bindings rterm *ast.Term rbindings *bindings + ref ast.Ref + pos int } func (e evalTerm) eval(iter unifyIterator) error { @@ -3440,33 +3714,56 @@ func (e evalTerm) enumerate(iter unifyIterator) error { switch v := e.term.Value.(type) { case *ast.Array: - for i := 0; i < v.Len(); i++ { - k := ast.IntNumberTerm(i) - err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, k) - }) + // Note(anders): + // For this case (e.g. input.foo[_]), we can avoid the (quite expensive) overhead of a callback + // function literal escaping to the heap in each iteration by inlining the biunification logic, + // meaning a 10x reduction in both the number of allocations made as well as the memory consumed. + // It is possible that such inlining could be done for the set/object cases as well, and that's + // worth looking into later, as I imagine set iteration in particular would be an even greater + // win across most policies. Those cases are however much more complex, as we need to deal with + // any type on either side, not just int/var as is the case here. + for i := range v.Len() { + a := ast.InternedIntNumberTerm(i) + b := e.ref[e.pos] + + if _, ok := b.Value.(ast.Var); ok { + if e.e.traceEnabled { + e.e.traceUnify(a, b) + } + var undo undo + b, e.bindings = e.bindings.apply(b) + e.bindings.bind(b, a, e.bindings, &undo) - if err := handleErr(err); err != nil { - return err + err := e.next(iter, a) + undo.Undo() + if err != nil { + if err := handleErr(err); err != nil { + return err + } + } } } case ast.Object: - if err := v.Iter(func(k, _ *ast.Term) error { + for _, k := range v.Keys() { err := e.e.biunify(k, e.ref[e.pos], e.termbindings, e.bindings, func() error { return e.next(iter, e.termbindings.Plug(k)) }) - return handleErr(err) - }); err != nil { - return err + if err != nil { + if err := handleErr(err); err != nil { + return err + } + } } case ast.Set: - if err := v.Iter(func(elem *ast.Term) error { + for _, elem := range v.Slice() { err := e.e.biunify(elem, e.ref[e.pos], e.termbindings, e.bindings, func() error { return e.next(iter, e.termbindings.Plug(elem)) }) - return handleErr(err) - }); err != nil { - return err + if err != nil { + if err := handleErr(err); err != nil { + return err + } + } } } @@ -3569,7 +3866,11 @@ func (e evalEvery) eval(iter unifyIterator) error { ).SetLocation(e.Domain.Location), ) - domain := e.e.closure(generator) + domain := evalPool.Get() + defer evalPool.Put(domain) + + e.e.closure(generator, domain) + all := true // all generator evaluations yield one successful body evaluation domain.traceEnter(e.expr) @@ -3580,7 +3881,11 @@ func (e evalEvery) eval(iter unifyIterator) error { // This would do extra work, like iterating needlessly if domain was a large array. return nil } - body := child.closure(e.Body) + + body := evalPool.Get() + defer evalPool.Put(body) + + child.closure(e.Body, body) body.findOne = true body.traceEnter(e.Body) done := false @@ -3707,10 +4012,12 @@ func applyCopyPropagation(p *copypropagation.CopyPropagator, instr *Instrumentat return result } +func nonGroundKey(k, _ *ast.Term) bool { + return !k.IsGround() +} + func nonGroundKeys(a ast.Object) bool { - return a.Until(func(k, _ *ast.Term) bool { - return !k.IsGround() - }) + return a.Until(nonGroundKey) } func plugKeys(a ast.Object, b *bindings) ast.Object { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/glob.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go similarity index 85% rename from vendor/github.com/open-policy-agent/opa/topdown/glob.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go index baf092ab6dd..efaf1d12484 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/glob.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go @@ -6,15 +6,17 @@ import ( "github.com/gobwas/glob" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) const globCacheMaxSize = 100 const globInterQueryValueCacheHits = "rego_builtin_glob_interquery_value_cache_hits" -var globCacheLock = sync.Mutex{} -var globCache map[string]glob.Glob +var noDelimiters = []rune{} +var dotDelimiters = []rune{'.'} +var globCacheLock = sync.RWMutex{} +var globCache = map[string]glob.Glob{} func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { pattern, err := builtins.StringOperand(operands[0].Value, 1) @@ -25,14 +27,14 @@ func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast. var delimiters []rune switch operands[1].Value.(type) { case ast.Null: - delimiters = []rune{} + delimiters = noDelimiters case *ast.Array: delimiters, err = builtins.RuneSliceOperand(operands[1].Value, 2) if err != nil { return err } if len(delimiters) == 0 { - delimiters = []rune{'.'} + delimiters = dotDelimiters } default: return builtins.NewOperandTypeErr(2, operands[1].Value, "array", "null") @@ -55,12 +57,13 @@ func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast. if err != nil { return err } - return iter(ast.BooleanTerm(m)) + return iter(ast.InternedBooleanTerm(m)) } func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimiters []rune) (bool, error) { if bctx.InterQueryBuiltinValueCache != nil { + // TODO: Use named cache val, ok := bctx.InterQueryBuiltinValueCache.Get(ast.String(id)) if ok { pat, valid := val.(glob.Glob) @@ -86,14 +89,15 @@ func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimit return res.Match(match), nil } - globCacheLock.Lock() - defer globCacheLock.Unlock() + globCacheLock.RLock() p, ok := globCache[id] + globCacheLock.RUnlock() if !ok { var err error if p, err = glob.Compile(pattern, delimiters...); err != nil { return false, err } + globCacheLock.Lock() if len(globCache) >= globCacheMaxSize { // Delete a (semi-)random key to make room for the new one. for k := range globCache { @@ -102,9 +106,10 @@ func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimit } } globCache[id] = p + globCacheLock.Unlock() } - out := p.Match(match) - return out, nil + + return p.Match(match), nil } func builtinGlobQuoteMeta(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -117,7 +122,6 @@ func builtinGlobQuoteMeta(_ BuiltinContext, operands []*ast.Term, iter func(*ast } func init() { - globCache = map[string]glob.Glob{} RegisterBuiltinFunc(ast.GlobMatch.Name, builtinGlobMatch) RegisterBuiltinFunc(ast.GlobQuoteMeta.Name, builtinGlobQuoteMeta) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/graphql.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go similarity index 54% rename from vendor/github.com/open-policy-agent/opa/topdown/graphql.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go index 8fb1b58a76d..c887041cd7c 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/graphql.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go @@ -16,8 +16,9 @@ import ( // Side-effecting import. Triggers GraphQL library's validation rule init() functions. _ "github.com/open-policy-agent/opa/internal/gqlparser/validator/rules" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" ) // Parses a GraphQL schema, and returns the GraphQL AST for the schema. @@ -160,7 +161,7 @@ func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value { // Iterate over the array's elements, and do the following: // - Drop any Nulls // - Drop any any empty object/array value (after running the pruner) - for i := 0; i < x.Len(); i++ { + for i := range x.Len() { vTerm := x.Elem(i) switch v := vTerm.Value.(type) { case ast.Null: @@ -174,7 +175,7 @@ func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value { case ast.Object: // Safe, because we knew the type before going to prune it. vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object) - if len(vo.Keys()) > 0 { + if vo.Len() > 0 { result = result.Append(ast.NewTerm(vo)) } default: @@ -209,7 +210,7 @@ func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value { case ast.Object: // Safe, because we knew the type before going to prune it. vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object) - if len(vo.Keys()) > 0 { + if vo.Len() > 0 { result.Insert(k, ast.NewTerm(vo)) } default: @@ -223,9 +224,11 @@ func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value { } // Reports errors from parsing/validation. -func builtinGraphQLParse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinGraphQLParse(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { var queryDoc *gqlast.QueryDocument var schemaDoc *gqlast.SchemaDocument + var schemaASTValue ast.Value + var querySchema ast.Value var err error // Parse/translate query if it's a string/object. @@ -242,42 +245,56 @@ func builtinGraphQLParse(_ BuiltinContext, operands []*ast.Term, iter func(*ast. return err } - // Parse/translate schema if it's a string/object. - switch x := operands[1].Value.(type) { - case ast.String: - schemaDoc, err = parseSchema(string(x)) - case ast.Object: - schemaDoc, err = objectToSchemaDocument(x) - default: - // Error if wrong type. - return builtins.NewOperandTypeErr(1, x, "string", "object") - } - if err != nil { - return err - } + schemaCacheKey, schema := cacheGetSchema(bctx, operands[1]) + schemaASTCacheKey, querySchema := cacheGetSchemaAST(bctx, operands[1]) + if schema == nil || querySchema == nil { + // Parse/translate schema if it's a string/object. + switch x := operands[1].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return builtins.NewOperandTypeErr(1, x, "string", "object") + } + if err != nil { + return err + } + + // Convert SchemaDoc to Object before validating and converting it to a Schema + // This precludes inclusion of extra definitions from the default GraphQL schema + if querySchema == nil { + schemaASTValue, err = ast.InterfaceToValue(schemaDoc) + if err != nil { + return err + } + querySchema = pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) + cacheInsertSchemaAST(bctx, schemaASTCacheKey, querySchema) + } + + // Validate the query against the schema, erroring if there's an issue. + if schema == nil { + schema, err = convertSchema(schemaDoc) + if err != nil { + return err + } + cacheInsertSchema(bctx, schemaCacheKey, schema) + } + } // Transform the ASTs into Objects. queryASTValue, err := ast.InterfaceToValue(queryDoc) if err != nil { return err } - schemaASTValue, err := ast.InterfaceToValue(schemaDoc) - if err != nil { - return err - } - // Validate the query against the schema, erroring if there's an issue. - schema, err := convertSchema(schemaDoc) - if err != nil { - return err - } if err := validateQuery(schema, queryDoc); err != nil { return err } // Recursively remove irrelevant AST structures. queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object)) - querySchema := pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) // Construct return value. verified := ast.ArrayTerm( @@ -289,13 +306,15 @@ func builtinGraphQLParse(_ BuiltinContext, operands []*ast.Term, iter func(*ast. } // Returns default value when errors occur. -func builtinGraphQLParseAndVerify(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinGraphQLParseAndVerify(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { var queryDoc *gqlast.QueryDocument var schemaDoc *gqlast.SchemaDocument + var schemaASTValue ast.Value + var querySchema ast.Value var err error unverified := ast.ArrayTerm( - ast.BooleanTerm(false), + ast.InternedBooleanTerm(false), ast.NewTerm(ast.NewObject()), ast.NewTerm(ast.NewObject()), ) @@ -314,46 +333,61 @@ func builtinGraphQLParseAndVerify(_ BuiltinContext, operands []*ast.Term, iter f return iter(unverified) } - // Parse/translate schema if it's a string/object. - switch x := operands[1].Value.(type) { - case ast.String: - schemaDoc, err = parseSchema(string(x)) - case ast.Object: - schemaDoc, err = objectToSchemaDocument(x) - default: - // Error if wrong type. - return iter(unverified) - } - if err != nil { - return iter(unverified) - } - // Transform the ASTs into Objects. queryASTValue, err := ast.InterfaceToValue(queryDoc) if err != nil { return iter(unverified) } - schemaASTValue, err := ast.InterfaceToValue(schemaDoc) - if err != nil { - return iter(unverified) + + schemaCacheKey, schema := cacheGetSchema(bctx, operands[1]) + schemaASTCacheKey, querySchema := cacheGetSchemaAST(bctx, operands[1]) + if schema == nil || querySchema == nil { + // Parse/translate schema if it's a string/object. + switch x := operands[1].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return iter(unverified) + } + if err != nil { + return iter(unverified) + } + + // Convert SchemaDoc to Object before validating and converting it to a Schema + // This precludes inclusion of extra definitions from the default GraphQL schema + if querySchema == nil { + schemaASTValue, err = ast.InterfaceToValue(schemaDoc) + if err != nil { + return iter(unverified) + } + querySchema = pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) + cacheInsertSchemaAST(bctx, schemaASTCacheKey, querySchema) + } + + if schema == nil { + schema, err = convertSchema(schemaDoc) + if err != nil { + return iter(unverified) + } + cacheInsertSchema(bctx, schemaCacheKey, schema) + } + } // Validate the query against the schema, erroring if there's an issue. - schema, err := convertSchema(schemaDoc) - if err != nil { - return iter(unverified) - } if err := validateQuery(schema, queryDoc); err != nil { return iter(unverified) } // Recursively remove irrelevant AST structures. queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object)) - querySchema := pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) // Construct return value. verified := ast.ArrayTerm( - ast.BooleanTerm(true), + ast.InternedBooleanTerm(true), ast.NewTerm(queryResult), ast.NewTerm(querySchema), ) @@ -385,33 +419,43 @@ func builtinGraphQLParseQuery(_ BuiltinContext, operands []*ast.Term, iter func( return iter(ast.NewTerm(result)) } -func builtinGraphQLParseSchema(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - raw, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } +func builtinGraphQLParseSchema(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + schemaDocCacheKey, schemaDoc := cacheGetSchemaDoc(bctx, operands[0]) + if schemaDoc == nil { + raw, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } - // Get the highly-nested AST struct, along with any errors generated. - schema, err := parseSchema(string(raw)) - if err != nil { - return err + // Get the highly-nested AST struct, along with any errors generated. + schemaDoc, err = parseSchema(string(raw)) + if err != nil { + return err + } + // Note SchemaDoc is not validated + cacheInsertSchemaDoc(bctx, schemaDocCacheKey, schemaDoc) } - // Transform the AST into an Object. - value, err := ast.InterfaceToValue(schema) - if err != nil { - return err - } + schemaASTCacheKey, schemaAST := cacheGetSchemaAST(bctx, operands[0]) + if schemaAST == nil { - // Recursively remove irrelevant AST structures. - result := pruneIrrelevantGraphQLASTNodes(value.(ast.Object)) + // Transform the AST into an Object. + value, err := ast.InterfaceToValue(schemaDoc) + if err != nil { + return err + } - return iter(ast.NewTerm(result)) + // Recursively remove irrelevant AST structures. + schemaAST = pruneIrrelevantGraphQLASTNodes(value.(ast.Object)) + cacheInsertSchemaAST(bctx, schemaASTCacheKey, schemaAST) + } + return iter(ast.NewTerm(schemaAST)) } -func builtinGraphQLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinGraphQLIsValid(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { var queryDoc *gqlast.QueryDocument var schemaDoc *gqlast.SchemaDocument + var schema *gqlast.Schema var err error switch x := operands[0].Value.(type) { @@ -421,61 +465,205 @@ func builtinGraphQLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*as queryDoc, err = objectToQueryDocument(x) default: // Error if wrong type. - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } if err != nil { - return iter(ast.BooleanTerm(false)) - } + return iter(ast.InternedBooleanTerm(false)) + } + + schemaCacheKey, schema := cacheGetSchema(bctx, operands[1]) + if schema == nil { + switch x := operands[1].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return iter(ast.InternedBooleanTerm(false)) + } + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } - switch x := operands[1].Value.(type) { - case ast.String: - schemaDoc, err = parseSchema(string(x)) - case ast.Object: - schemaDoc, err = objectToSchemaDocument(x) - default: - // Error if wrong type. - return iter(ast.BooleanTerm(false)) - } - if err != nil { - return iter(ast.BooleanTerm(false)) + // Validate the query against the schema, erroring if there's an issue. + schema, err = convertSchema(schemaDoc) + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + cacheInsertSchema(bctx, schemaCacheKey, schema) } - // Validate the query against the schema, erroring if there's an issue. - schema, err := convertSchema(schemaDoc) - if err != nil { - return iter(ast.BooleanTerm(false)) - } if err := validateQuery(schema, queryDoc); err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } // If we got this far, the GraphQL query passed validation. - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) } -func builtinGraphQLSchemaIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - var schemaDoc *gqlast.SchemaDocument +func builtinGraphQLSchemaIsValid(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { var err error - switch x := operands[0].Value.(type) { - case ast.String: - schemaDoc, err = parseSchema(string(x)) - case ast.Object: - schemaDoc, err = objectToSchemaDocument(x) - default: - // Error if wrong type. - return iter(ast.BooleanTerm(false)) + // Schemas are only cached if they are valid + schemaCacheKey, schema := cacheGetSchema(bctx, operands[0]) + if schema == nil { + var schemaDoc *gqlast.SchemaDocument + var validatedSchema *gqlast.Schema + + switch x := operands[0].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return iter(ast.InternedBooleanTerm(false)) + } + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + // Validate the schema, this determines the result + // and whether there is a schema to cache + validatedSchema, err = convertSchema(schemaDoc) + if err == nil { + cacheInsertSchema(bctx, schemaCacheKey, validatedSchema) + } } - if err != nil { - return iter(ast.BooleanTerm(false)) + + return iter(ast.InternedBooleanTerm(err == nil)) +} + +// Insert Schema into cache +func cacheInsertSchema(bctx BuiltinContext, key string, schema *gqlast.Schema) { + if bctx.InterQueryBuiltinValueCache == nil || key == "" { + return + } + cacheKey := ast.String(key) + c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName) + if c == nil { + return + } + c.Insert(cacheKey, schema) +} + +// Insert SchemaAST into cache +func cacheInsertSchemaAST(bctx BuiltinContext, key string, schemaAST ast.Value) { + if bctx.InterQueryBuiltinValueCache == nil || key == "" { + return + } + cacheKeyAST := ast.String(key) + c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName) + if c == nil { + return + } + c.Insert(cacheKeyAST, schemaAST) +} + +// Insert SchemaDocument into cache +func cacheInsertSchemaDoc(bctx BuiltinContext, key string, schemaDoc *gqlast.SchemaDocument) { + if bctx.InterQueryBuiltinValueCache == nil || key == "" { + return + } + cacheKey := ast.String(key) + c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName) + if c == nil { + return + } + c.Insert(cacheKey, schemaDoc) +} + +// Returns the cache key and a Schema if this key already exists in the cache +func cacheGetSchema(bctx BuiltinContext, t *ast.Term) (string, *gqlast.Schema) { + if bctx.InterQueryBuiltinValueCache != nil { + if c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName); c != nil { + if key, keyOk := cacheKeyWithPrefix(bctx, t, "gql_schema-"); keyOk { + if val, ok := c.Get(ast.String(key)); ok { + if schema, isSchema := val.(*gqlast.Schema); isSchema { + return key, schema + } + } + return key, nil + } + } + } + return "", nil +} + +// Returns the cache key and a SchemaDocument if this key already exists in the cache +// Note: the SchemaDocument is not a validated Schema +func cacheGetSchemaDoc(bctx BuiltinContext, t *ast.Term) (string, *gqlast.SchemaDocument) { + if bctx.InterQueryBuiltinValueCache != nil { + if c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName); c != nil { + if key, keyOk := cacheKeyWithPrefix(bctx, t, "gql_schema_doc-"); keyOk { + if val, ok := c.Get(ast.String(key)); ok { + if schemaDoc, isSchemaDoc := val.(*gqlast.SchemaDocument); isSchemaDoc { + return key, schemaDoc + } + } + return key, nil + } + } + } + return "", nil +} + +// Returns the cache key and a SchemaDocument if this key already exists in the cache +// Note: the AST should be pruned +func cacheGetSchemaAST(bctx BuiltinContext, t *ast.Term) (string, ast.Value) { + if bctx.InterQueryBuiltinValueCache != nil { + if c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName); c != nil { + if key, keyOk := cacheKeyWithPrefix(bctx, t, "gql_schema_ast-"); keyOk { + if val, ok := c.Get(ast.String(key)); ok { + if schemaAST, isSchemaAST := val.(ast.Value); isSchemaAST { + return key, schemaAST + } + } + return key, nil + } + } + } + return "", nil +} + +// Compute a constant size key for use with the cache +func cacheKeyWithPrefix(bctx BuiltinContext, t *ast.Term, prefix string) (string, bool) { + var cacheKey ast.String + var ok = false + + if bctx.InterQueryBuiltinValueCache != nil { + switch t.Value.(type) { + case ast.String: + err := builtinCryptoSha256(bctx, []*ast.Term{t}, func(term *ast.Term) error { + cacheKey = term.Value.(ast.String) + return nil + }) + ok = (len(cacheKey) > 0) && (err == nil) + case ast.Object: + objTerm := ast.StringTerm(t.String()) + err := builtinCryptoSha256(bctx, []*ast.Term{objTerm}, func(term *ast.Term) error { + cacheKey = term.Value.(ast.String) + return nil + }) + ok = (len(cacheKey) > 0) && (err == nil) + default: + ok = false + } } - // Validate the schema, this determines the result - _, err = convertSchema(schemaDoc) - return iter(ast.BooleanTerm(err == nil)) + return prefix + string(cacheKey), ok } +const gqlCacheName = "graphql" + func init() { + + var defaultCacheEntries int = 10 + var graphqlCacheConfig = cache.NamedValueCacheConfig{ + MaxNumEntries: &defaultCacheEntries, + } + cache.RegisterDefaultInterQueryBuiltinValueCacheConfig(gqlCacheName, &graphqlCacheConfig) + RegisterBuiltinFunc(ast.GraphQLParse.Name, builtinGraphQLParse) RegisterBuiltinFunc(ast.GraphQLParseAndVerify.Name, builtinGraphQLParseAndVerify) RegisterBuiltinFunc(ast.GraphQLParseQuery.Name, builtinGraphQLParseQuery) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go similarity index 90% rename from vendor/github.com/open-policy-agent/opa/topdown/http.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/http.go index 18bfd3c7224..463f01de220 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/http.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go @@ -10,6 +10,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "io" "math" @@ -22,12 +23,12 @@ import ( "strings" "time" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/util" ) type cachingMode string @@ -86,16 +87,33 @@ var cacheableHTTPStatusCodes = [...]int{ http.StatusNotImplemented, } +var ( + codeTerm = ast.StringTerm("code") + messageTerm = ast.StringTerm("message") + statusCodeTerm = ast.StringTerm("status_code") + errorTerm = ast.StringTerm("error") + methodTerm = ast.StringTerm("method") + urlTerm = ast.StringTerm("url") + + httpSendNetworkErrTerm = ast.StringTerm(HTTPSendNetworkErr) + httpSendInternalErrTerm = ast.StringTerm(HTTPSendInternalErr) +) + var ( allowedKeys = ast.NewSet() + keyCache = make(map[string]*ast.Term, len(allowedKeyNames)) cacheableCodes = ast.NewSet() - requiredKeys = ast.NewSet(ast.StringTerm("method"), ast.StringTerm("url")) - httpSendLatencyMetricKey = "rego_builtin_" + strings.ReplaceAll(ast.HTTPSend.Name, ".", "_") + requiredKeys = ast.NewSet(methodTerm, urlTerm) + httpSendLatencyMetricKey = "rego_builtin_http_send" httpSendInterQueryCacheHits = httpSendLatencyMetricKey + "_interquery_cache_hits" ) type httpSendKey string +// CustomizeRoundTripper allows customizing an existing http.Transport, +// to the returned value, which could be the same Transport or a new one. +type CustomizeRoundTripper func(*http.Transport) http.RoundTripper + const ( // httpSendBuiltinCacheKey is the key in the builtin context cache that // points to the http.send() specific cache resides at. @@ -147,22 +165,24 @@ func builtinHTTPSend(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.T } func generateRaiseErrorResult(err error) *ast.Term { - obj := ast.NewObject() - obj.Insert(ast.StringTerm("status_code"), ast.IntNumberTerm(0)) - - errObj := ast.NewObject() - + var errObj ast.Object switch err.(type) { case *url.Error: - errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendNetworkErr)) + errObj = ast.NewObject( + ast.Item(codeTerm, httpSendNetworkErrTerm), + ast.Item(messageTerm, ast.StringTerm(err.Error())), + ) default: - errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendInternalErr)) + errObj = ast.NewObject( + ast.Item(codeTerm, httpSendInternalErrTerm), + ast.Item(messageTerm, ast.StringTerm(err.Error())), + ) } - errObj.Insert(ast.StringTerm("message"), ast.StringTerm(err.Error())) - obj.Insert(ast.StringTerm("error"), ast.NewTerm(errObj)) - - return ast.NewTerm(obj) + return ast.NewTerm(ast.NewObject( + ast.Item(statusCodeTerm, ast.InternedIntNumberTerm(0)), + ast.Item(errorTerm, ast.NewTerm(errObj)), + )) } func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) { @@ -208,21 +228,21 @@ func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) { func getKeyFromRequest(req ast.Object) (ast.Object, error) { // deep copy so changes to key do not reflect in the request object key := req.Copy() - cacheIgnoredHeadersTerm := req.Get(ast.StringTerm("cache_ignored_headers")) + cacheIgnoredHeadersTerm := req.Get(keyCache["cache_ignored_headers"]) allHeadersTerm := req.Get(ast.StringTerm("headers")) // skip because no headers to delete if cacheIgnoredHeadersTerm == nil || allHeadersTerm == nil { // need to explicitly set cache_ignored_headers to null // equivalent requests might have different sets of exclusion lists - key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm()) + key.Insert(ast.StringTerm("cache_ignored_headers"), ast.InternedNullTerm) return key, nil } var cacheIgnoredHeaders []string - var allHeaders map[string]interface{} err := ast.As(cacheIgnoredHeadersTerm.Value, &cacheIgnoredHeaders) if err != nil { return nil, err } + var allHeaders map[string]interface{} err = ast.As(allHeadersTerm.Value, &allHeaders) if err != nil { return nil, err @@ -234,14 +254,14 @@ func getKeyFromRequest(req ast.Object) (ast.Object, error) { if err != nil { return nil, err } - key.Insert(ast.StringTerm("headers"), ast.NewTerm(val)) + key.Insert(keyCache["headers"], ast.NewTerm(val)) // remove cache_ignored_headers key - key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm()) + key.Insert(keyCache["cache_ignored_headers"], ast.InternedNullTerm) return key, nil } func init() { - createAllowedKeys() + createKeys() createCacheableHTTPStatusCodes() initDefaults() RegisterBuiltinFunc(ast.HTTPSend.Name, builtinHTTPSend) @@ -385,33 +405,24 @@ func verifyURLHost(bctx BuiltinContext, unverifiedURL string) error { } func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *http.Client, error) { - var url string - var method string - - // Additional CA certificates loading options. - var tlsCaCert []byte - var tlsCaCertEnvVar string - var tlsCaCertFile string - - // Client TLS certificate and key options. Each input source - // comes in a matched pair. - var tlsClientCert []byte - var tlsClientKey []byte - - var tlsClientCertEnvVar string - var tlsClientKeyEnvVar string - - var tlsClientCertFile string - var tlsClientKeyFile string - - var tlsServerName string - var body *bytes.Buffer - var rawBody *bytes.Buffer - var enableRedirect bool - var tlsUseSystemCerts *bool - var tlsConfig tls.Config - var customHeaders map[string]interface{} - var tlsInsecureSkipVerify bool + var ( + url, method string + // Additional CA certificates loading options. + tlsCaCert []byte + tlsCaCertEnvVar, tlsCaCertFile string + // Client TLS certificate and key options. Each input source + // comes in a matched pair. + tlsClientCert, tlsClientKey []byte + tlsClientCertEnvVar, tlsClientKeyEnvVar string + tlsClientCertFile, tlsClientKeyFile, tlsServerName string + + body, rawBody *bytes.Buffer + enableRedirect, tlsInsecureSkipVerify bool + tlsUseSystemCerts *bool + tlsConfig tls.Config + customHeaders map[string]interface{} + ) + timeout := defaultHTTPRequestTimeout for _, val := range obj.Keys() { @@ -509,7 +520,7 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt var ok bool customHeaders, ok = headersValInterface.(map[string]interface{}) if !ok { - return nil, nil, fmt.Errorf("invalid type for headers key") + return nil, nil, errors.New("invalid type for headers key") } case "tls_insecure_skip_verify": tlsInsecureSkipVerify, err = strconv.ParseBool(obj.Get(val).String()) @@ -596,7 +607,7 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt } if len(tlsCaCert) != 0 { - tlsCaCert = bytes.Replace(tlsCaCert, []byte("\\n"), []byte("\n"), -1) + tlsCaCert = bytes.ReplaceAll(tlsCaCert, []byte("\\n"), []byte("\n")) pool, err := addCACertsFromBytes(tlsConfig.RootCAs, tlsCaCert) if err != nil { return nil, nil, err @@ -626,23 +637,29 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt tlsConfig.RootCAs = pool } + var transport *http.Transport if isTLS { if ok, parsedURL, tr := useSocket(url, &tlsConfig); ok { - client.Transport = tr + transport = tr url = parsedURL } else { - tr := http.DefaultTransport.(*http.Transport).Clone() - tr.TLSClientConfig = &tlsConfig - tr.DisableKeepAlives = true - client.Transport = tr + transport = http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = &tlsConfig + transport.DisableKeepAlives = true } } else { if ok, parsedURL, tr := useSocket(url, nil); ok { - client.Transport = tr + transport = tr url = parsedURL } } + if bctx.RoundTripper != nil { + client.Transport = bctx.RoundTripper(transport) + } else if transport != nil { + client.Transport = transport + } + // check if redirects are enabled if enableRedirect { client.CheckRedirect = func(req *http.Request, _ []*http.Request) error { @@ -714,7 +731,7 @@ func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast. var err error var retry int - retry, err = getNumberValFromReqObj(inputReqObj, ast.StringTerm("max_retry_attempts")) + retry, err = getNumberValFromReqObj(inputReqObj, keyCache["max_retry_attempts"]) if err != nil { return nil, err } @@ -764,28 +781,17 @@ type httpSendCacheEntry struct { // The httpSendCache is used for intra-query caching of http.send results. type httpSendCache struct { - entries *util.HashMap + entries *util.HasherMap[ast.Value, httpSendCacheEntry] } func newHTTPSendCache() *httpSendCache { return &httpSendCache{ - entries: util.NewHashMap(valueEq, valueHash), + entries: util.NewHasherMap[ast.Value, httpSendCacheEntry](ast.ValueEqual), } } -func valueHash(v util.T) int { - return ast.StringTerm(v.(ast.Value).String()).Hash() -} - -func valueEq(a, b util.T) bool { - av := a.(ast.Value) - bv := b.(ast.Value) - return av.String() == bv.String() -} - func (cache *httpSendCache) get(k ast.Value) *httpSendCacheEntry { if v, ok := cache.entries.Get(k); ok { - v := v.(httpSendCacheEntry) return &v } return nil @@ -966,7 +972,7 @@ func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) { // insertIntoHTTPSendInterQueryCache inserts given key and value in the inter-query cache func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) error { - if resp == nil || (!forceCaching(cacheParams) && !canStore(resp.Header)) || !cacheableCodes.Contains(ast.IntNumberTerm(resp.StatusCode)) { + if resp == nil || (!forceCaching(cacheParams) && !canStore(resp.Header)) || !cacheableCodes.Contains(ast.InternedIntNumberTerm(resp.StatusCode)) { return nil } @@ -974,7 +980,7 @@ func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp obj, ok := key.(ast.Object) if !ok { - return fmt.Errorf("interface conversion error") + return errors.New("interface conversion error") } cachingMode, err := getCachingMode(obj) @@ -999,15 +1005,18 @@ func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp return nil } -func createAllowedKeys() { +func createKeys() { for _, element := range allowedKeyNames { - allowedKeys.Add(ast.StringTerm(element)) + term := ast.StringTerm(element) + + allowedKeys.Add(term) + keyCache[element] = term } } func createCacheableHTTPStatusCodes() { for _, element := range cacheableHTTPStatusCodes { - cacheableCodes.Add(ast.IntNumberTerm(element)) + cacheableCodes.Add(ast.InternedIntNumberTerm(element)) } } @@ -1035,7 +1044,7 @@ func parseTimeout(timeoutVal ast.Value) (time.Duration, error) { } return timeout, nil default: - return timeout, builtins.NewOperandErr(1, "'timeout' must be one of {string, number} but got %s", ast.TypeName(t)) + return timeout, builtins.NewOperandErr(1, "'timeout' must be one of {string, number} but got %s", ast.ValueName(t)) } } @@ -1068,7 +1077,7 @@ func getNumberValFromReqObj(req ast.Object, key *ast.Term) (int, error) { } func getCachingMode(req ast.Object) (cachingMode, error) { - key := ast.StringTerm("caching_mode") + key := keyCache["caching_mode"] var s ast.String var ok bool if v := req.Get(key); v != nil { @@ -1188,7 +1197,7 @@ func (c *interQueryCacheData) toCacheValue() (*interQueryCacheValue, error) { return &interQueryCacheValue{Data: b}, nil } -func (c *interQueryCacheData) SizeInBytes() int64 { +func (*interQueryCacheData) SizeInBytes() int64 { return 0 } @@ -1317,7 +1326,7 @@ func parseCacheControlHeader(headers http.Header) map[string]string { func getResponseHeaderDate(headers http.Header) (date time.Time, err error) { dateHeader := headers.Get("date") if dateHeader == "" { - err = fmt.Errorf("no date header") + err = errors.New("no date header") return } return http.ParseTime(dateHeader) @@ -1467,11 +1476,11 @@ func (c *interQueryCache) CheckCache() (ast.Value, error) { return resp, nil } - c.forceJSONDecode, err = getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode")) + c.forceJSONDecode, err = getBoolValFromReqObj(c.key, keyCache["force_json_decode"]) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } - c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, ast.StringTerm("force_yaml_decode")) + c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, keyCache["force_yaml_decode"]) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } @@ -1535,11 +1544,11 @@ func (c *intraQueryCache) CheckCache() (ast.Value, error) { // InsertIntoCache inserts the key set on this object into the cache with the given value func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { - forceJSONDecode, err := getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode")) + forceJSONDecode, err := getBoolValFromReqObj(c.key, keyCache["force_json_decode"]) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } - forceYAMLDecode, err := getBoolValFromReqObj(c.key, ast.StringTerm("force_yaml_decode")) + forceYAMLDecode, err := getBoolValFromReqObj(c.key, keyCache["force_yaml_decode"]) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } @@ -1549,7 +1558,7 @@ func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, erro return nil, handleHTTPSendErr(c.bctx, err) } - if cacheableCodes.Contains(ast.IntNumberTerm(value.StatusCode)) { + if cacheableCodes.Contains(ast.InternedIntNumberTerm(value.StatusCode)) { insertIntoHTTPSendCache(c.bctx, c.key, result) } @@ -1570,12 +1579,12 @@ func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) { } func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) { - value, err := getBoolValFromReqObj(req, ast.StringTerm("cache")) + value, err := getBoolValFromReqObj(req, keyCache["cache"]) if err != nil { return false, nil, err } - valueForceCache, err := getBoolValFromReqObj(req, ast.StringTerm("force_cache")) + valueForceCache, err := getBoolValFromReqObj(req, keyCache["force_cache"]) if err != nil { return false, nil, err } @@ -1593,9 +1602,9 @@ type forceCacheParams struct { } func newForceCacheParams(req ast.Object) (*forceCacheParams, error) { - term := req.Get(ast.StringTerm("force_cache_duration_seconds")) + term := req.Get(keyCache["force_cache_duration_seconds"]) if term == nil { - return nil, fmt.Errorf("'force_cache' set but 'force_cache_duration_seconds' parameter is missing") + return nil, errors.New("'force_cache' set but 'force_cache_duration_seconds' parameter is missing") } forceCacheDurationSeconds := term.String() @@ -1611,9 +1620,9 @@ func newForceCacheParams(req ast.Object) (*forceCacheParams, error) { func getRaiseErrorValue(req ast.Object) (bool, error) { result := ast.Boolean(true) var ok bool - if v := req.Get(ast.StringTerm("raise_error")); v != nil { + if v := req.Get(keyCache["raise_error"]); v != nil { if result, ok = v.Value.(ast.Boolean); !ok { - return false, fmt.Errorf("invalid value for raise_error field") + return false, errors.New("invalid value for raise_error field") } } return bool(result), nil diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http_fixup.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/topdown/http_fixup.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http_fixup_darwin.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/topdown/http_fixup_darwin.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go diff --git a/vendor/github.com/open-policy-agent/opa/topdown/input.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/input.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/topdown/input.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/input.go index cb70aeb71ed..ec37b364510 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/input.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/input.go @@ -5,12 +5,12 @@ package topdown import ( - "fmt" + "errors" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) -var errBadPath = fmt.Errorf("bad document path") +var errBadPath = errors.New("bad document path") func mergeTermWithValues(exist *ast.Term, pairs [][2]*ast.Term) (*ast.Term, error) { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go similarity index 97% rename from vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go index 6eacc338ef3..93da1d00225 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go @@ -4,7 +4,7 @@ package topdown -import "github.com/open-policy-agent/opa/metrics" +import "github.com/open-policy-agent/opa/v1/metrics" const ( evalOpPlug = "eval_op_plug" diff --git a/vendor/github.com/open-policy-agent/opa/topdown/json.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/topdown/json.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/json.go index 8a5d2328360..aa1023d377c 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/json.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go @@ -5,12 +5,13 @@ package topdown import ( + "errors" "fmt" "strconv" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" "github.com/open-policy-agent/opa/internal/edittree" ) @@ -98,7 +99,7 @@ func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) { // When indexes are removed we shift left to close empty spots in the array // as per the JSON patch spec. newArray := ast.NewArray() - for i := 0; i < aValue.Len(); i++ { + for i := range aValue.Len() { v := aValue.Elem(i) // recurse and add the diff of sub objects as needed // Note: Keys in b will be strings for the index, eg path /a/1/b => {"a": {"1": {"b": null}}} @@ -144,7 +145,7 @@ func getJSONPaths(operand ast.Value) ([]ast.Ref, error) { switch v := operand.(type) { case *ast.Array: - for i := 0; i < v.Len(); i++ { + for i := range v.Len() { filter, err := parsePath(v.Elem(i)) if err != nil { return nil, err @@ -189,7 +190,7 @@ func parsePath(path *ast.Term) (ast.Ref, error) { pathSegments = append(pathSegments, term) }) default: - return nil, builtins.NewOperandErr(2, "must be one of {set, array} containing string paths or array of path segments but got %v", ast.TypeName(p)) + return nil, builtins.NewOperandErr(2, "must be one of {set, array} containing string paths or array of path segments but got %v", ast.ValueName(p)) } return pathSegments, nil @@ -231,7 +232,7 @@ func pathsToObject(paths []ast.Ref) ast.Object { } if !done { - node.Insert(path[len(path)-1], ast.NullTerm()) + node.Insert(path[len(path)-1], ast.InternedNullTerm) } } @@ -263,7 +264,7 @@ func getPatch(o ast.Object) (jsonPatch, error) { } op, ok := opTerm.Value.(ast.String) if !ok { - return out, fmt.Errorf("attribute 'op' must be a string") + return out, errors.New("attribute 'op' must be a string") } out.op = string(op) if _, found := validOps[out.op]; !found { @@ -302,10 +303,10 @@ func getPatch(o ast.Object) (jsonPatch, error) { func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { et := edittree.NewEditTree(source) - for i := 0; i < operations.Len(); i++ { + for i := range operations.Len() { object, ok := operations.Elem(i).Value.(ast.Object) if !ok { - return nil, fmt.Errorf("must be an array of JSON-Patch objects, but at least one element is not an object") + return nil, errors.New("must be an array of JSON-Patch objects, but at least one element is not an object") } patch, err := getPatch(object) if err != nil { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go similarity index 96% rename from vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go index d319bc0b0df..b1609fb0445 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go @@ -8,8 +8,8 @@ import ( "encoding/json" "errors" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/gojsonschema" + "github.com/open-policy-agent/opa/v1/ast" ) // astValueToJSONSchemaLoader converts a value to JSON Loader. @@ -44,7 +44,7 @@ func astValueToJSONSchemaLoader(value ast.Value) (gojsonschema.JSONLoader, error } func newResultTerm(valid bool, data *ast.Term) *ast.Term { - return ast.ArrayTerm(ast.BooleanTerm(valid), data) + return ast.ArrayTerm(ast.InternedBooleanTerm(valid), data) } // builtinJSONSchemaVerify accepts 1 argument which can be string or object and checks if it is valid JSON schema. @@ -61,7 +61,7 @@ func builtinJSONSchemaVerify(_ BuiltinContext, operands []*ast.Term, iter func(* return iter(newResultTerm(false, ast.StringTerm("jsonschema: "+err.Error()))) } - return iter(newResultTerm(true, ast.NullTerm())) + return iter(newResultTerm(true, ast.InternedNullTerm)) } // builtinJSONMatchSchema accepts 2 arguments both can be string or object and verifies if the document matches the JSON schema. diff --git a/vendor/github.com/open-policy-agent/opa/topdown/net.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go similarity index 93% rename from vendor/github.com/open-policy-agent/opa/topdown/net.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/net.go index 534520529a7..17ed7798449 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/net.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go @@ -8,8 +8,8 @@ import ( "net" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type lookupIPAddrCacheKey string diff --git a/vendor/github.com/open-policy-agent/opa/topdown/numbers.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go similarity index 63% rename from vendor/github.com/open-policy-agent/opa/topdown/numbers.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go index 27f3156b8ad..398040d7aee 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/numbers.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go @@ -5,18 +5,23 @@ package topdown import ( + "errors" "fmt" "math/big" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type randIntCachingKey string +var zero = big.NewInt(0) var one = big.NewInt(1) func builtinNumbersRange(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + if canGenerateCheapRange(operands) { + return generateCheapRange(operands, iter) + } x, err := builtins.BigIntOperand(operands[0].Value, 1) if err != nil { @@ -53,8 +58,8 @@ func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter fun return err } - if step.Cmp(big.NewInt(0)) <= 0 { - return fmt.Errorf("numbers.range_step: step must be a positive number above zero") + if step.Cmp(zero) <= 0 { + return errors.New("numbers.range_step: step must be a positive number above zero") } ast, err := generateRange(bctx, x, y, step, "numbers.range_step") @@ -65,6 +70,59 @@ func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter fun return iter(ast) } +func canGenerateCheapRange(operands []*ast.Term) bool { + x, err := builtins.IntOperand(operands[0].Value, 1) + if err != nil || !ast.HasInternedIntNumberTerm(x) { + return false + } + + y, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil || !ast.HasInternedIntNumberTerm(y) { + return false + } + + return true +} + +func generateCheapRange(operands []*ast.Term, iter func(*ast.Term) error) error { + x, err := builtins.IntOperand(operands[0].Value, 1) + if err != nil { + return err + } + + y, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + step := 1 + + if len(operands) > 2 { + stepOp, err := builtins.IntOperand(operands[2].Value, 3) + if err == nil { + step = stepOp + } + } + + if step <= 0 { + return errors.New("numbers.range_step: step must be a positive number above zero") + } + + terms := make([]*ast.Term, 0, y+1) + + if x <= y { + for i := x; i <= y; i += step { + terms = append(terms, ast.InternedIntNumberTerm(i)) + } + } else { + for i := x; i >= y; i -= step { + terms = append(terms, ast.InternedIntNumberTerm(i)) + } + } + + return iter(ast.ArrayTerm(terms...)) +} + func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, funcName string) (*ast.Term, error) { cmp := x.Cmp(y) @@ -81,7 +139,7 @@ func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, f haltErr := Halt{ Err: &Error{ Code: CancelErr, - Message: fmt.Sprintf("%s: timed out before generating all numbers in range", funcName), + Message: funcName + ": timed out before generating all numbers in range", }, } @@ -109,7 +167,7 @@ func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.T } if n == 0 { - return iter(ast.IntNumberTerm(0)) + return iter(ast.InternedIntNumberTerm(0)) } if n < 0 { @@ -126,7 +184,7 @@ func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.T if err != nil { return err } - result := ast.IntNumberTerm(r.Intn(n)) + result := ast.InternedIntNumberTerm(r.Intn(n)) bctx.Cache.Put(key, result) return iter(result) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/object.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go similarity index 91% rename from vendor/github.com/open-policy-agent/opa/topdown/object.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/object.go index ba5d77ff371..56313b5b56f 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/object.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go @@ -5,9 +5,9 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/ref" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinObjectUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -21,6 +21,16 @@ func builtinObjectUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T return err } + if objA.Len() == 0 { + return iter(operands[1]) + } + if objB.Len() == 0 { + return iter(operands[0]) + } + if objA.Compare(objB) == 0 { + return iter(operands[0]) + } + r := mergeWithOverwrite(objA, objB) return iter(ast.NewTerm(r)) @@ -50,9 +60,6 @@ func builtinObjectUnionN(_ BuiltinContext, operands []*ast.Term, iter func(*ast. return builtins.NewOperandElementErr(1, arr, arr.Elem(i).Value, "object") } mergewithOverwriteInPlace(result, o, frozenKeys) - if err != nil { - return err - } } return iter(ast.NewTerm(result)) @@ -95,7 +102,7 @@ func builtinObjectFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast. filterObj := ast.NewObject() keys.Foreach(func(key *ast.Term) { - filterObj.Insert(key, ast.NullTerm()) + filterObj.Insert(key, ast.InternedNullTerm) }) // Actually do the filtering @@ -144,37 +151,24 @@ func builtinObjectKeys(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te return err } - keys := ast.SetTerm(object.Keys()...) - - return iter(keys) + return iter(ast.SetTerm(object.Keys()...)) } // getObjectKeysParam returns a set of key values // from a supplied ast array, object, set value func getObjectKeysParam(arrayOrSet ast.Value) (ast.Set, error) { - keys := ast.NewSet() - switch v := arrayOrSet.(type) { case *ast.Array: - _ = v.Iter(func(f *ast.Term) error { - keys.Add(f) - return nil - }) + keys := ast.NewSet() + v.Foreach(keys.Add) + return keys, nil case ast.Set: - _ = v.Iter(func(f *ast.Term) error { - keys.Add(f) - return nil - }) + return ast.NewSet(v.Slice()...), nil case ast.Object: - _ = v.Iter(func(k *ast.Term, _ *ast.Term) error { - keys.Add(k) - return nil - }) - default: - return nil, builtins.NewOperandTypeErr(2, arrayOrSet, "object", "set", "array") + return ast.NewSet(v.Keys()...), nil } - return keys, nil + return nil, builtins.NewOperandTypeErr(2, arrayOrSet, "object", "set", "array") } func mergeWithOverwrite(objA, objB ast.Object) ast.Object { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go similarity index 91% rename from vendor/github.com/open-policy-agent/opa/topdown/parse.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go index c46222b413e..464e0141a26 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/parse.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go @@ -9,8 +9,8 @@ import ( "encoding/json" "fmt" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinRegoParseModule(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -25,6 +25,7 @@ func builtinRegoParseModule(_ BuiltinContext, operands []*ast.Term, iter func(*a return err } + // FIXME: Use configured rego-version? module, err := ast.ParseModule(string(filename), string(input)) if err != nil { return err diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go similarity index 74% rename from vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go index 0cd4bc193a7..cd36b87b178 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go @@ -10,8 +10,8 @@ import ( "strings" "unicode" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) const ( @@ -109,7 +109,7 @@ func builtinNumBytes(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term func formatString(s ast.String) string { str := string(s) lower := strings.ToLower(str) - return strings.Replace(lower, "\"", "", -1) + return strings.ReplaceAll(lower, "\"", "") } // Splits the string into a number string à la "10" or "10.2" and a unit @@ -121,21 +121,35 @@ func extractNumAndUnit(s string) (string, string) { } firstNonNumIdx := -1 - for idx, r := range s { - if !isNum(r) { + for idx := 0; idx < len(s); idx++ { + r := rune(s[idx]) + // Identify the first non-numeric character, marking the boundary between the number and the unit. + if !isNum(r) && r != 'e' && r != 'E' && r != '+' && r != '-' { firstNonNumIdx = idx break } + if r == 'e' || r == 'E' { + // Check if the next character is a valid digit or +/- for scientific notation + if idx == len(s)-1 || (!unicode.IsDigit(rune(s[idx+1])) && rune(s[idx+1]) != '+' && rune(s[idx+1]) != '-') { + firstNonNumIdx = idx + break + } + // Skip the next character if it is '+' or '-' + if idx+1 < len(s) && (s[idx+1] == '+' || s[idx+1] == '-') { + idx++ + } + } } - if firstNonNumIdx == -1 { // only digits and '.' + if firstNonNumIdx == -1 { // only digits, '.', or valid scientific notation return s, "" } if firstNonNumIdx == 0 { // only units (starts with non-digit) return "", s } - return s[0:firstNonNumIdx], s[firstNonNumIdx:] + // Return the number and the rest as the unit + return s[:firstNonNumIdx], s[firstNonNumIdx:] } func init() { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse_units.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/topdown/parse_units.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go index daf240214c3..44aec862998 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/parse_units.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go @@ -10,8 +10,8 @@ import ( "math/big" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) // Binary Si unit constants are borrowed from topdown/parse_bytes @@ -50,7 +50,7 @@ func builtinUnits(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e // We remove escaped quotes from strings here to retain parity with units.parse_bytes. s := string(raw) - s = strings.Replace(s, "\"", "", -1) + s = strings.ReplaceAll(s, "\"", "") if strings.Contains(s, " ") { return errIncludesSpaces diff --git a/vendor/github.com/open-policy-agent/opa/topdown/print.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go similarity index 88% rename from vendor/github.com/open-policy-agent/opa/topdown/print.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/print.go index 765b344b3a0..f852f3e320a 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/print.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go @@ -9,9 +9,9 @@ import ( "io" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/print" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/print" ) func NewPrintHook(w io.Writer) print.Hook { @@ -62,7 +62,7 @@ func builtinPrintCrossProductOperands(bctx BuiltinContext, buf []string, operand xs, ok := operands.Elem(i).Value.(ast.Set) if !ok { - return Halt{Err: internalErr(bctx.Location, fmt.Sprintf("illegal argument type: %v", ast.TypeName(operands.Elem(i).Value)))} + return Halt{Err: internalErr(bctx.Location, fmt.Sprintf("illegal argument type: %v", ast.ValueName(operands.Elem(i).Value)))} } if xs.Len() == 0 { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go new file mode 100644 index 00000000000..ce684ae945b --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go @@ -0,0 +1,21 @@ +package print + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// Context provides the Hook implementation context about the print() call. +type Context struct { + Context context.Context // request context passed when query executed + Location *ast.Location // location of print call +} + +// Hook defines the interface that callers can implement to receive print +// statement outputs. If the hook returns an error, it will be surfaced if +// strict builtin error checking is enabled (otherwise, it will not halt +// execution.) +type Hook interface { + Print(Context, string) error +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/providers.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go similarity index 97% rename from vendor/github.com/open-policy-agent/opa/topdown/providers.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go index 77db917982d..dd84026e4b3 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/providers.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go @@ -9,9 +9,9 @@ import ( "net/url" "time" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/providers/aws" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) var awsRequiredConfigKeyNames = ast.NewSet( @@ -119,9 +119,6 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a } signingTimestamp = time.Unix(0, ts) - if err != nil { - return err - } // Make sure our required keys exist! // This check is stricter than required, but better to break here than downstream. diff --git a/vendor/github.com/open-policy-agent/opa/topdown/query.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go similarity index 90% rename from vendor/github.com/open-policy-agent/opa/topdown/query.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/query.go index 8406cfdd879..bb55b4a6ccb 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/query.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go @@ -7,15 +7,15 @@ import ( "sort" "time" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/resolver" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/copypropagation" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/resolver" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/copypropagation" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" ) // QueryResultSet represents a collection of results returned by a query. @@ -46,6 +46,7 @@ type Query struct { instr *Instrumentation disableInlining []ast.Ref shallowInlining bool + nondeterministicBuiltins bool genvarprefix string runtime *ast.Term builtins map[string]*Builtin @@ -57,9 +58,11 @@ type Query struct { strictBuiltinErrors bool builtinErrorList *[]Error strictObjects bool + roundTripper CustomizeRoundTripper printHook print.Hook tracingOpts tracing.Options virtualCache VirtualCache + baseCache BaseCache } // Builtin represents a built-in function that queries can call. @@ -279,6 +282,12 @@ func (q *Query) WithResolver(ref ast.Ref, r resolver.Resolver) *Query { return q } +// WithHTTPRoundTripper configures a custom HTTP transport for built-in functions that make HTTP requests. +func (q *Query) WithHTTPRoundTripper(t CustomizeRoundTripper) *Query { + q.roundTripper = t + return q +} + func (q *Query) WithPrintHook(h print.Hook) *Query { q.printHook = h return q @@ -306,6 +315,21 @@ func (q *Query) WithVirtualCache(vc VirtualCache) *Query { return q } +// WithBaseCache sets the BaseCache to use during evaluation. This is +// optional, and if not set, the default cache is used. +func (q *Query) WithBaseCache(bc BaseCache) *Query { + q.baseCache = bc + return q +} + +// WithNondeterministicBuiltins causes non-deterministic builtins to be evalued +// during partial evaluation. This is needed to pull in external data, or validate +// a JWT, during PE, so that the result informs what queries are returned. +func (q *Query) WithNondeterministicBuiltins(yes bool) *Query { + q.nondeterministicBuiltins = yes + return q +} + // PartialRun executes partial evaluation on the query with respect to unknown // values. Partial evaluation attempts to evaluate as much of the query as // possible without requiring values for the unknowns set on the query. The @@ -320,7 +344,7 @@ func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support [] if q.seed == nil { q.seed = rand.Reader } - if !q.time.IsZero() { + if q.time.IsZero() { q.time = time.Now() } if q.metrics == nil { @@ -337,6 +361,13 @@ func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support [] vc = NewVirtualCache() } + var bc BaseCache + if q.baseCache != nil { + bc = q.baseCache + } else { + bc = newBaseCache() + } + e := &eval{ ctx: ctx, metrics: q.metrics, @@ -350,7 +381,7 @@ func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support [] bindings: b, compiler: q.compiler, store: q.store, - baseCache: newBaseCache(), + baseCache: bc, targetStack: newRefStack(), txn: q.txn, input: q.input, @@ -373,7 +404,8 @@ func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support [] saveNamespace: ast.StringTerm(q.partialNamespace), skipSaveNamespace: q.skipSaveNamespace, inliningControl: &inliningControl{ - shallow: q.shallowInlining, + shallow: q.shallowInlining, + nondeterministicBuiltins: q.nondeterministicBuiltins, }, genvarprefix: q.genvarprefix, runtime: q.runtime, @@ -474,7 +506,11 @@ func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support [] } } - for i := range support { + for i, m := range support { + if regoVersion := q.compiler.DefaultRegoVersion(); regoVersion != ast.RegoUndefined { + ast.SetModuleRegoVersion(m, q.compiler.DefaultRegoVersion()) + } + sort.Slice(support[i].Rules, func(j, k int) bool { return support[i].Rules[j].Compare(support[i].Rules[k]) < 0 }) @@ -523,6 +559,13 @@ func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { vc = NewVirtualCache() } + var bc BaseCache + if q.baseCache != nil { + bc = q.baseCache + } else { + bc = newBaseCache() + } + e := &eval{ ctx: ctx, metrics: q.metrics, @@ -536,7 +579,7 @@ func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { bindings: newBindings(0, q.instr), compiler: q.compiler, store: q.store, - baseCache: newBaseCache(), + baseCache: bc, targetStack: newRefStack(), txn: q.txn, input: q.input, @@ -561,6 +604,7 @@ func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { printHook: q.printHook, tracingOpts: q.tracingOpts, strictObjects: q.strictObjects, + roundTripper: q.roundTripper, } e.caller = e q.metrics.Timer(metrics.RegoQueryEval).Start() diff --git a/vendor/github.com/open-policy-agent/opa/topdown/reachable.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go similarity index 97% rename from vendor/github.com/open-policy-agent/opa/topdown/reachable.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go index 8d61018e764..1c31019db9e 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/reachable.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) // Helper: sets of vertices can be represented as Arrays or Sets. diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go similarity index 93% rename from vendor/github.com/open-policy-agent/opa/topdown/regex.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go index 452e7d58bfb..2c434dda879 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/regex.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go @@ -11,8 +11,8 @@ import ( gintersect "github.com/yashtewari/glob-intersection" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) const regexCacheMaxSize = 100 @@ -25,15 +25,15 @@ func builtinRegexIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast. s, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } _, err = regexp.Compile(string(s)) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) } func builtinRegexMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -49,7 +49,7 @@ func builtinRegexMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast if err != nil { return err } - return iter(ast.BooleanTerm(re.MatchString(string(s2)))) + return iter(ast.InternedBooleanTerm(re.MatchString(string(s2)))) } func builtinRegexMatchTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -79,7 +79,7 @@ func builtinRegexMatchTemplate(_ BuiltinContext, operands []*ast.Term, iter func if err != nil { return err } - return iter(ast.BooleanTerm(re.MatchString(string(match)))) + return iter(ast.InternedBooleanTerm(re.MatchString(string(match)))) } func builtinRegexSplit(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -106,6 +106,7 @@ func builtinRegexSplit(bctx BuiltinContext, operands []*ast.Term, iter func(*ast func getRegexp(bctx BuiltinContext, pat string) (*regexp.Regexp, error) { if bctx.InterQueryBuiltinValueCache != nil { + // TODO: Use named cache val, ok := bctx.InterQueryBuiltinValueCache.Get(ast.String(pat)) if ok { res, valid := val.(*regexp.Regexp) @@ -176,7 +177,7 @@ func builtinGlobsMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te if err != nil { return err } - return iter(ast.BooleanTerm(ne)) + return iter(ast.InternedBooleanTerm(ne)) } func builtinRegexFind(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -259,6 +260,9 @@ func builtinRegexReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*a } res := re.ReplaceAllString(string(base), string(value)) + if res == string(base) { + return iter(operands[0]) + } return iter(ast.StringTerm(res)) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go similarity index 99% rename from vendor/github.com/open-policy-agent/opa/topdown/regex_template.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go index 4bcddc060b9..a1d946fd59e 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go @@ -45,7 +45,7 @@ import ( func delimiterIndices(s string, delimiterStart, delimiterEnd byte) ([]int, error) { var level, idx int idxs := make([]int, 0) - for i := 0; i < len(s); i++ { + for i := range len(s) { switch s[i] { case delimiterStart: if level++; level == 1 { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/resolver.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go similarity index 86% rename from vendor/github.com/open-policy-agent/opa/topdown/resolver.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go index 5ed6c1e4432..3620168874b 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/resolver.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go @@ -5,9 +5,9 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/resolver" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/resolver" ) type resolverTrie struct { @@ -48,7 +48,11 @@ func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) { Input: e.input, Metrics: e.metrics, } - e.traceWasm(e.query[e.index], &in.Ref) + if e.traceEnabled { + // avoid leaking pointer if trace is disabled + cpy := in.Ref + e.traceWasm(e.query[e.index], &cpy) + } if e.data != nil { return nil, errInScopeWithStmt } @@ -75,7 +79,10 @@ func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) { func (t *resolverTrie) mktree(e *eval, in resolver.Input) (ast.Value, error) { if t.r != nil { - e.traceWasm(e.query[e.index], &in.Ref) + if e.traceEnabled { + cpy := in.Ref + e.traceWasm(e.query[e.index], &cpy) + } if e.data != nil { return nil, errInScopeWithStmt } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/runtime.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go similarity index 86% rename from vendor/github.com/open-policy-agent/opa/topdown/runtime.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go index 7d512f7c00c..dc72fc58181 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/runtime.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go @@ -5,19 +5,24 @@ package topdown import ( + "errors" "fmt" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) +var configStringTerm = ast.StringTerm("config") + +var nothingResolver ast.Resolver = illegalResolver{} + func builtinOPARuntime(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error { if bctx.Runtime == nil { - return iter(ast.ObjectTerm()) + return iter(ast.InternedEmptyObject) } - if bctx.Runtime.Get(ast.StringTerm("config")) != nil { - iface, err := ast.ValueToInterface(bctx.Runtime.Value, illegalResolver{}) + if bctx.Runtime.Get(configStringTerm) != nil { + iface, err := ast.ValueToInterface(bctx.Runtime.Value, nothingResolver) if err != nil { return err } @@ -110,7 +115,7 @@ func removeCryptoKeys(x interface{}) error { func removeKey(x interface{}, keys ...string) error { val, ok := x.(map[string]interface{}) if !ok { - return fmt.Errorf("type assertion error") + return errors.New("type assertion error") } for _, key := range keys { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/save.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/save.go similarity index 96% rename from vendor/github.com/open-policy-agent/opa/topdown/save.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/save.go index 0468692cc69..439f554a347 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/save.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/save.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // saveSet contains a stack of terms that are considered 'unknown' during @@ -365,7 +365,13 @@ func saveRequired(c *ast.Compiler, ic *inliningControl, icIgnoreInternal bool, s } switch node := node.(type) { case *ast.Expr: - found = len(node.With) > 0 || ignoreExprDuringPartial(node) + found = len(node.With) > 0 + if found { + return found + } + if !ic.nondeterministicBuiltins { // skip evaluating non-det builtins for PE + found = ignoreExprDuringPartial(node) + } case *ast.Term: switch v := node.Value.(type) { case ast.Var: @@ -422,8 +428,9 @@ func ignoreDuringPartial(bi *ast.Builtin) bool { } type inliningControl struct { - shallow bool - disable []disableInliningFrame + shallow bool + disable []disableInliningFrame + nondeterministicBuiltins bool // evaluate non-det builtins during PE (if args are known) } type disableInliningFrame struct { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/semver.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go similarity index 85% rename from vendor/github.com/open-policy-agent/opa/topdown/semver.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go index 7bb7b9c1838..0e7daaeae64 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/semver.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go @@ -7,9 +7,9 @@ package topdown import ( "fmt" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/semver" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -34,13 +34,13 @@ func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast result := versionA.Compare(*versionB) - return iter(ast.IntNumberTerm(result)) + return iter(ast.InternedIntNumberTerm(result)) } func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { versionString, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } result := true @@ -50,7 +50,7 @@ func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast result = false } - return iter(ast.BooleanTerm(result)) + return iter(ast.InternedBooleanTerm(result)) } func init() { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/sets.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go similarity index 92% rename from vendor/github.com/open-policy-agent/opa/topdown/sets.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go index a973404f3f5..9df2d328a0f 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/sets.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go @@ -5,11 +5,11 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) -// Deprecated in v0.4.2 in favour of minus/infix "-" operation. +// Deprecated: deprecated in v0.4.2 in favour of minus/infix "-" operation. func builtinSetDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { s1, err := builtins.SetOperand(operands[0].Value, 1) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/strings.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go similarity index 71% rename from vendor/github.com/open-policy-agent/opa/topdown/strings.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go index d9e4a55e587..3b1a412c393 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/strings.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go @@ -5,15 +5,20 @@ package topdown import ( + "errors" "fmt" "math/big" "sort" + "strconv" "strings" + "unicode" + "unicode/utf8" "github.com/tchap/go-patricia/v2/patricia" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" ) func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -47,7 +52,7 @@ func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*as return builtins.NewOperandTypeErr(2, b, "string", "set", "array") } - return iter(ast.BooleanTerm(anyStartsWithAny(strs, prefixes))) + return iter(ast.InternedBooleanTerm(anyStartsWithAny(strs, prefixes))) } func builtinAnySuffixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -87,7 +92,7 @@ func builtinAnySuffixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*as return builtins.NewOperandTypeErr(2, b, "string", "set", "array") } - return iter(ast.BooleanTerm(anyStartsWithAny(strsReversed, suffixesReversed))) + return iter(ast.InternedBooleanTerm(anyStartsWithAny(strsReversed, suffixesReversed))) } func anyStartsWithAny(strs []string, prefixes []string) bool { @@ -99,11 +104,11 @@ func anyStartsWithAny(strs []string, prefixes []string) bool { } trie := patricia.NewTrie() - for i := 0; i < len(strs); i++ { + for i := range strs { trie.Insert([]byte(strs[i]), true) } - for i := 0; i < len(prefixes); i++ { + for i := range prefixes { if trie.MatchSubtree([]byte(prefixes[i])) { return true } @@ -151,33 +156,48 @@ func builtinConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return err } - strs := []string{} + var strs []string switch b := operands[1].Value.(type) { case *ast.Array: - err := b.Iter(func(x *ast.Term) error { - s, ok := x.Value.(ast.String) + var l int + for i := range b.Len() { + s, ok := b.Elem(i).Value.(ast.String) if !ok { - return builtins.NewOperandElementErr(2, operands[1].Value, x.Value, "string") + return builtins.NewOperandElementErr(2, operands[1].Value, b.Elem(i).Value, "string") } - strs = append(strs, string(s)) - return nil - }) - if err != nil { - return err + l += len(string(s)) + } + + if b.Len() == 1 { + return iter(b.Elem(0)) } + + strs = make([]string, 0, l) + for i := range b.Len() { + strs = append(strs, string(b.Elem(i).Value.(ast.String))) + } + case ast.Set: - err := b.Iter(func(x *ast.Term) error { - s, ok := x.Value.(ast.String) + var l int + terms := b.Slice() + for i := range terms { + s, ok := terms[i].Value.(ast.String) if !ok { - return builtins.NewOperandElementErr(2, operands[1].Value, x.Value, "string") + return builtins.NewOperandElementErr(2, operands[1].Value, terms[i].Value, "string") } - strs = append(strs, string(s)) - return nil - }) - if err != nil { - return err + l += len(string(s)) } + + if b.Len() == 1 { + return iter(b.Slice()[0]) + } + + strs = make([]string, 0, l) + for i := range b.Len() { + strs = append(strs, string(terms[i].Value.(ast.String))) + } + default: return builtins.NewOperandTypeErr(2, operands[1].Value, "set", "array") } @@ -208,7 +228,14 @@ func builtinIndexOf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return err } if len(string(search)) == 0 { - return fmt.Errorf("empty search character") + return errors.New("empty search character") + } + + if isASCII(string(base)) && isASCII(string(search)) { + // this is a false positive in the indexAlloc rule that thinks + // we're converting byte arrays to strings + //nolint:gocritic + return iter(ast.InternedIntNumberTerm(strings.Index(string(base), string(search)))) } baseRunes := []rune(string(base)) @@ -218,14 +245,14 @@ func builtinIndexOf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) for i, r := range baseRunes { if len(baseRunes) >= i+searchLen { if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { - return iter(ast.IntNumberTerm(i)) + return iter(ast.InternedIntNumberTerm(i)) } } else { break } } - return iter(ast.IntNumberTerm(-1)) + return iter(ast.InternedIntNumberTerm(-1)) } func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -239,7 +266,7 @@ func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term return err } if len(string(search)) == 0 { - return fmt.Errorf("empty search character") + return errors.New("empty search character") } baseRunes := []rune(string(base)) @@ -250,7 +277,7 @@ func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term for i, r := range baseRunes { if len(baseRunes) >= i+searchLen { if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { - arr = append(arr, ast.IntNumberTerm(i)) + arr = append(arr, ast.InternedIntNumberTerm(i)) } } else { break @@ -266,15 +293,10 @@ func builtinSubstring(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter if err != nil { return err } - runes := []rune(base) startIndex, err := builtins.IntOperand(operands[1].Value, 2) if err != nil { return err - } else if startIndex >= len(runes) { - return iter(ast.StringTerm("")) - } else if startIndex < 0 { - return fmt.Errorf("negative offset") } length, err := builtins.IntOperand(operands[2].Value, 3) @@ -282,18 +304,68 @@ func builtinSubstring(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter return err } - var s ast.String + if startIndex < 0 { + return errors.New("negative offset") + } + + sbase := string(base) + if sbase == "" { + return iter(ast.InternedEmptyString) + } + + // Optimized path for the likely common case of ASCII strings. + // This allocates less memory and runs in about 1/3 the time. + if isASCII(sbase) { + if startIndex >= len(sbase) { + return iter(ast.InternedEmptyString) + } + + if length < 0 { + return iter(ast.StringTerm(sbase[startIndex:])) + } + + if startIndex == 0 && length >= len(sbase) { + return iter(operands[0]) + } + + upto := startIndex + length + if len(sbase) < upto { + upto = len(sbase) + } + return iter(ast.StringTerm(sbase[startIndex:upto])) + } + + if startIndex == 0 && length >= utf8.RuneCountInString(sbase) { + return iter(operands[0]) + } + + runes := []rune(base) + + if startIndex >= len(runes) { + return iter(ast.InternedEmptyString) + } + + var s string if length < 0 { - s = ast.String(runes[startIndex:]) + s = string(runes[startIndex:]) } else { upto := startIndex + length if len(runes) < upto { upto = len(runes) } - s = ast.String(runes[startIndex:upto]) + s = string(runes[startIndex:upto]) } - return iter(ast.NewTerm(s)) + return iter(ast.StringTerm(s)) +} + +func isASCII(s string) bool { + for i := range len(s) { + if s[i] > unicode.MaxASCII { + return false + } + } + return true } func builtinContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -307,7 +379,7 @@ func builtinContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term return err } - return iter(ast.BooleanTerm(strings.Contains(string(s), string(substr)))) + return iter(ast.InternedBooleanTerm(strings.Contains(string(s), string(substr)))) } func builtinStringCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -323,10 +395,9 @@ func builtinStringCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T baseTerm := string(s) searchTerm := string(substr) - count := strings.Count(baseTerm, searchTerm) - return iter(ast.IntNumberTerm(count)) + return iter(ast.InternedIntNumberTerm(count)) } func builtinStartsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -340,7 +411,7 @@ func builtinStartsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te return err } - return iter(ast.BooleanTerm(strings.HasPrefix(string(s), string(prefix)))) + return iter(ast.InternedBooleanTerm(strings.HasPrefix(string(s), string(prefix)))) } func builtinEndsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -354,7 +425,7 @@ func builtinEndsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term return err } - return iter(ast.BooleanTerm(strings.HasSuffix(string(s), string(suffix)))) + return iter(ast.InternedBooleanTerm(strings.HasSuffix(string(s), string(suffix)))) } func builtinLower(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -372,7 +443,14 @@ func builtinUpper(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e return err } - return iter(ast.StringTerm(strings.ToUpper(string(s)))) + arg := string(s) + upp := strings.ToUpper(arg) + + if arg == upp { + return iter(operands[0]) + } + + return iter(ast.StringTerm(upp)) } func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -380,15 +458,22 @@ func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e if err != nil { return err } + d, err := builtins.StringOperand(operands[1].Value, 2) if err != nil { return err } + + if !strings.Contains(string(s), string(d)) { + return iter(ast.ArrayTerm(operands[0])) + } + elems := strings.Split(string(s), string(d)) - arr := make([]*ast.Term, len(elems)) + arr := util.NewPtrSlice[ast.Term](len(elems)) for i := range elems { - arr[i] = ast.StringTerm(elems[i]) + arr[i].Value = ast.String(elems[i]) } + return iter(ast.ArrayTerm(arr...)) } @@ -408,7 +493,12 @@ func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return err } - return iter(ast.StringTerm(strings.Replace(string(s), string(old), string(n), -1))) + replaced := strings.ReplaceAll(string(s), string(old), string(n)) + if replaced == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(replaced)) } func builtinReplaceN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -437,14 +527,8 @@ func builtinReplaceN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term } oldnewArr = append(oldnewArr, string(keyVal), string(strVal)) } - if err != nil { - return err - } - - r := strings.NewReplacer(oldnewArr...) - replaced := r.Replace(string(s)) - return iter(ast.StringTerm(replaced)) + return iter(ast.StringTerm(strings.NewReplacer(oldnewArr...).Replace(string(s)))) } func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -458,6 +542,11 @@ func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er return err } + trimmed := strings.Trim(string(s), string(c)) + if trimmed == string(s) { + return iter(operands[0]) + } + return iter(ast.StringTerm(strings.Trim(string(s), string(c)))) } @@ -472,7 +561,12 @@ func builtinTrimLeft(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term return err } - return iter(ast.StringTerm(strings.TrimLeft(string(s), string(c)))) + trimmed := strings.TrimLeft(string(s), string(c)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) } func builtinTrimPrefix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -486,7 +580,12 @@ func builtinTrimPrefix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te return err } - return iter(ast.StringTerm(strings.TrimPrefix(string(s), string(pre)))) + trimmed := strings.TrimPrefix(string(s), string(pre)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) } func builtinTrimRight(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -500,7 +599,12 @@ func builtinTrimRight(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter return err } - return iter(ast.StringTerm(strings.TrimRight(string(s), string(c)))) + trimmed := strings.TrimRight(string(s), string(c)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) } func builtinTrimSuffix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -514,7 +618,12 @@ func builtinTrimSuffix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te return err } - return iter(ast.StringTerm(strings.TrimSuffix(string(s), string(suf)))) + trimmed := strings.TrimSuffix(string(s), string(suf)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) } func builtinTrimSpace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -523,7 +632,12 @@ func builtinTrimSpace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter return err } - return iter(ast.StringTerm(strings.TrimSpace(string(s)))) + trimmed := strings.TrimSpace(string(s)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) } func builtinSprintf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -537,7 +651,17 @@ func builtinSprintf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return builtins.NewOperandTypeErr(2, operands[1].Value, "array") } - args := make([]interface{}, astArr.Len()) + // Optimized path for where sprintf is used as a "to_string" function for + // a single integer, i.e. sprintf("%d", [x]) where x is an integer. + if s == "%d" && astArr.Len() == 1 { + if n, ok := astArr.Elem(0).Value.(ast.Number); ok { + if i, ok := n.Int(); ok { + return iter(ast.InternedStringTerm(strconv.Itoa(i))) + } + } + } + + args := make([]any, astArr.Len()) for i := range args { switch v := astArr.Elem(i).Value.(type) { @@ -571,15 +695,23 @@ func builtinReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) } func reverseString(str string) string { - sRunes := []rune(str) - length := len(sRunes) - reversedRunes := make([]rune, length) + var buf []byte + var arr [255]byte + size := len(str) + + if size < 255 { + buf = arr[:size:size] + } else { + buf = make([]byte, size) + } - for index, r := range sRunes { - reversedRunes[length-index-1] = r + for start := 0; start < size; { + r, n := utf8.DecodeRuneInString(str[start:]) + start += n + utf8.EncodeRune(buf[size-start:], r) } - return string(reversedRunes) + return string(buf) } func init() { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/subset.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go similarity index 82% rename from vendor/github.com/open-policy-agent/opa/topdown/subset.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go index 7b152a5ef97..29354d9730c 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/subset.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func bothObjects(t1, t2 *ast.Term) (bool, ast.Object, ast.Object) { @@ -88,9 +88,8 @@ func arraySet(t1, t2 *ast.Term) (bool, *ast.Array, ast.Set) { // associated with a key. func objectSubset(super ast.Object, sub ast.Object) bool { var superTerm *ast.Term - isSubset := true - sub.Until(func(key, subTerm *ast.Term) bool { + notSubset := sub.Until(func(key, subTerm *ast.Term) bool { // This really wants to be a for loop, hence the somewhat // weird internal structure. However, using Until() in this // was is a performance optimization, as it avoids performing @@ -98,10 +97,9 @@ func objectSubset(super ast.Object, sub ast.Object) bool { superTerm = super.Get(key) - // subTerm is can't be nil because we got it from Until(), so + // subTerm can't be nil because we got it from Until(), so // we only need to verify that super is non-nil. if superTerm == nil { - isSubset = false return true // break, not a subset } @@ -114,58 +112,39 @@ func objectSubset(super ast.Object, sub ast.Object) bool { // them normally. If only one term is an object, then we // do a normal comparison which will come up false. if ok, superObj, subObj := bothObjects(superTerm, subTerm); ok { - if !objectSubset(superObj, subObj) { - isSubset = false - return true // break, not a subset - } - - return false // continue + return !objectSubset(superObj, subObj) } if ok, superSet, subSet := bothSets(superTerm, subTerm); ok { - if !setSubset(superSet, subSet) { - isSubset = false - return true // break, not a subset - } - - return false // continue + return !setSubset(superSet, subSet) } if ok, superArray, subArray := bothArrays(superTerm, subTerm); ok { - if !arraySubset(superArray, subArray) { - isSubset = false - return true // break, not a subset - } - - return false // continue + return !arraySubset(superArray, subArray) } // We have already checked for exact equality, as well as for // all of the types of nested subsets we care about, so if we // get here it means this isn't a subset. - isSubset = false return true // break, not a subset }) - return isSubset + return !notSubset } // setSubset implements the subset operation on sets. // // Unlike in the object case, this is not recursive, we just compare values -// using ast.Set.Contains() because we have no well defined way to "match up" +// using ast.Set.Contains() because we have no well-defined way to "match up" // objects that are in different sets. func setSubset(super ast.Set, sub ast.Set) bool { - isSubset := true - sub.Until(func(t *ast.Term) bool { - if !super.Contains(t) { - isSubset = false - return true + for _, elem := range sub.Slice() { + if !super.Contains(elem) { + return false } - return false - }) + } - return isSubset + return true } // arraySubset implements the subset operation on arrays. @@ -197,12 +176,12 @@ func arraySubset(super, sub *ast.Array) bool { return false } - subElem := sub.Elem(subCursor) superElem := super.Elem(superCursor + subCursor) if superElem == nil { return false } + subElem := sub.Elem(subCursor) if superElem.Value.Compare(subElem.Value) == 0 { subCursor++ } else { @@ -237,22 +216,22 @@ func builtinObjectSubset(_ BuiltinContext, operands []*ast.Term, iter func(*ast. if ok, superObj, subObj := bothObjects(superTerm, subTerm); ok { // Both operands are objects. - return iter(ast.BooleanTerm(objectSubset(superObj, subObj))) + return iter(ast.InternedBooleanTerm(objectSubset(superObj, subObj))) } if ok, superSet, subSet := bothSets(superTerm, subTerm); ok { // Both operands are sets. - return iter(ast.BooleanTerm(setSubset(superSet, subSet))) + return iter(ast.InternedBooleanTerm(setSubset(superSet, subSet))) } if ok, superArray, subArray := bothArrays(superTerm, subTerm); ok { // Both operands are sets. - return iter(ast.BooleanTerm(arraySubset(superArray, subArray))) + return iter(ast.InternedBooleanTerm(arraySubset(superArray, subArray))) } if ok, superArray, subSet := arraySet(superTerm, subTerm); ok { // Super operand is array and sub operand is set - return iter(ast.BooleanTerm(arraySetSubset(superArray, subSet))) + return iter(ast.InternedBooleanTerm(arraySetSubset(superArray, subSet))) } return builtins.ErrOperand("both arguments object.subset must be of the same type or array and set") diff --git a/vendor/github.com/open-policy-agent/opa/topdown/template.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go similarity index 90% rename from vendor/github.com/open-policy-agent/opa/topdown/template.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/template.go index cf42477ee8c..cf4635559d1 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/template.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go @@ -4,8 +4,8 @@ import ( "bytes" "text/template" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func renderTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/test.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/test.go new file mode 100644 index 00000000000..02958d22642 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/test.go @@ -0,0 +1,30 @@ +// Copyright 2025 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import "github.com/open-policy-agent/opa/v1/ast" + +const TestCaseOp Op = "TestCase" + +func builtinTestCase(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + e := &Event{ + Op: TestCaseOp, + QueryID: bctx.QueryID, + Node: ast.NewExpr([]*ast.Term{ + ast.NewTerm(ast.InternalTestCase.Ref()), + ast.NewTerm(operands[0].Value), + }), + } + + for _, tracer := range bctx.QueryTracers { + tracer.TraceEvent(*e) + } + + return iter(ast.BooleanTerm(true)) +} + +func init() { + RegisterBuiltinFunc(ast.InternalTestCase.Name, builtinTestCase) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/time.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go similarity index 92% rename from vendor/github.com/open-policy-agent/opa/topdown/time.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/time.go index ba3efc75dc0..8d2d9b27a2a 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/time.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go @@ -6,7 +6,7 @@ package topdown import ( "encoding/json" - "fmt" + "errors" "math" "math/big" "strconv" @@ -14,8 +14,8 @@ import ( "time" _ "time/tzdata" // this is needed to have LoadLocation when no filesystem tzdata is available - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) var tzCache map[string]*time.Location @@ -29,7 +29,7 @@ var maxDateAllowedForNsConversion = time.Unix(0, math.MaxInt64) func toSafeUnixNano(t time.Time, iter func(*ast.Term) error) error { if t.Before(minDateAllowedForNsConversion) || t.After(maxDateAllowedForNsConversion) { - return fmt.Errorf("time outside of valid range") + return errors.New("time outside of valid range") } return iter(ast.NewTerm(ast.Number(int64ToJSONNumber(t.UnixNano())))) @@ -127,7 +127,7 @@ func builtinDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er return err } year, month, day := t.Date() - result := ast.NewArray(ast.IntNumberTerm(year), ast.IntNumberTerm(int(month)), ast.IntNumberTerm(day)) + result := ast.NewArray(ast.InternedIntNumberTerm(year), ast.InternedIntNumberTerm(int(month)), ast.InternedIntNumberTerm(day)) return iter(ast.NewTerm(result)) } @@ -137,7 +137,7 @@ func builtinClock(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e return err } hour, minute, second := t.Clock() - result := ast.NewArray(ast.IntNumberTerm(hour), ast.IntNumberTerm(minute), ast.IntNumberTerm(second)) + result := ast.NewArray(ast.InternedIntNumberTerm(hour), ast.InternedIntNumberTerm(minute), ast.InternedIntNumberTerm(second)) return iter(ast.NewTerm(result)) } @@ -238,8 +238,8 @@ func builtinDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er } // END REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT - return iter(ast.ArrayTerm(ast.IntNumberTerm(year), ast.IntNumberTerm(month), ast.IntNumberTerm(day), - ast.IntNumberTerm(hour), ast.IntNumberTerm(min), ast.IntNumberTerm(sec))) + return iter(ast.ArrayTerm(ast.InternedIntNumberTerm(year), ast.InternedIntNumberTerm(month), ast.InternedIntNumberTerm(day), + ast.InternedIntNumberTerm(hour), ast.InternedIntNumberTerm(min), ast.InternedIntNumberTerm(sec))) } func tzTime(a ast.Value) (t time.Time, lay string, err error) { @@ -313,7 +313,7 @@ func tzTime(a ast.Value) (t time.Time, lay string, err error) { f := builtins.NumberToFloat(value) i64, acc := f.Int64() if acc != big.Exact { - return time.Time{}, layout, fmt.Errorf("timestamp too big") + return time.Time{}, layout, errors.New("timestamp too big") } t = time.Unix(0, i64).In(loc) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go similarity index 74% rename from vendor/github.com/open-policy-agent/opa/topdown/tokens.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go index 7457f1f15d5..2050e82d630 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go @@ -21,11 +21,12 @@ import ( "math/big" "strings" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/jwx/jwa" "github.com/open-policy-agent/opa/internal/jwx/jwk" "github.com/open-policy-agent/opa/internal/jwx/jws" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" ) var ( @@ -129,8 +130,8 @@ func builtinJWTDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter } // Implements RS256 JWT signature verification -func builtinJWTVerifyRS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyRS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPKCS1v15( publicKey, crypto.SHA256, @@ -144,8 +145,8 @@ func builtinJWTVerifyRS256(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements RS384 JWT signature verification -func builtinJWTVerifyRS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyRS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPKCS1v15( publicKey, crypto.SHA384, @@ -159,8 +160,8 @@ func builtinJWTVerifyRS384(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements RS512 JWT signature verification -func builtinJWTVerifyRS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyRS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPKCS1v15( publicKey, crypto.SHA512, @@ -174,8 +175,8 @@ func builtinJWTVerifyRS512(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements PS256 JWT signature verification -func builtinJWTVerifyPS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyPS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPSS( publicKey, crypto.SHA256, @@ -190,8 +191,8 @@ func builtinJWTVerifyPS256(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements PS384 JWT signature verification -func builtinJWTVerifyPS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyPS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPSS( publicKey, crypto.SHA384, @@ -206,8 +207,8 @@ func builtinJWTVerifyPS384(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements PS512 JWT signature verification -func builtinJWTVerifyPS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyPS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPSS( publicKey, crypto.SHA512, @@ -222,19 +223,19 @@ func builtinJWTVerifyPS512(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements RSA JWT signature verification. -func builtinJWTVerifyRSA(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error) (ast.Value, error) { - return builtinJWTVerify(a, b, hasher, func(publicKey interface{}, digest []byte, signature []byte) error { +func builtinJWTVerifyRSA(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hasher func() hash.Hash, verify func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error) (ast.Value, error) { + return builtinJWTVerify(bctx, jwt, keyStr, hasher, func(publicKey interface{}, digest []byte, signature []byte) error { publicKeyRsa, ok := publicKey.(*rsa.PublicKey) if !ok { - return fmt.Errorf("incorrect public key type") + return errors.New("incorrect public key type") } return verify(publicKeyRsa, digest, signature) }) } // Implements ES256 JWT signature verification. -func builtinJWTVerifyES256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha256.New, verifyES) +func builtinJWTVerifyES256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha256.New, verifyES) if err == nil { return iter(ast.NewTerm(result)) } @@ -242,8 +243,8 @@ func builtinJWTVerifyES256(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements ES384 JWT signature verification -func builtinJWTVerifyES384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New384, verifyES) +func builtinJWTVerifyES384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha512.New384, verifyES) if err == nil { return iter(ast.NewTerm(result)) } @@ -251,8 +252,8 @@ func builtinJWTVerifyES384(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements ES512 JWT signature verification -func builtinJWTVerifyES512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New, verifyES) +func builtinJWTVerifyES512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha512.New, verifyES) if err == nil { return iter(ast.NewTerm(result)) } @@ -267,7 +268,7 @@ func verifyES(publicKey interface{}, digest []byte, signature []byte) (err error }() publicKeyEcdsa, ok := publicKey.(*ecdsa.PublicKey) if !ok { - return fmt.Errorf("incorrect public key type") + return errors.New("incorrect public key type") } r, s := &big.Int{}, &big.Int{} n := len(signature) / 2 @@ -276,7 +277,7 @@ func verifyES(publicKey interface{}, digest []byte, signature []byte) (err error if ecdsa.Verify(publicKeyEcdsa, digest, r, s) { return nil } - return fmt.Errorf("ECDSA signature verification error") + return errors.New("ECDSA signature verification error") } type verificationKey struct { @@ -291,7 +292,7 @@ type verificationKey struct { func getKeysFromCertOrJWK(certificate string) ([]verificationKey, error) { if block, rest := pem.Decode([]byte(certificate)); block != nil { if len(rest) > 0 { - return nil, fmt.Errorf("extra data after a PEM certificate block") + return nil, errors.New("extra data after a PEM certificate block") } if block.Type == blockTypeCertificate { @@ -311,7 +312,7 @@ func getKeysFromCertOrJWK(certificate string) ([]verificationKey, error) { return []verificationKey{{key: key}}, nil } - return nil, fmt.Errorf("failed to extract a Key from the PEM certificate") + return nil, errors.New("failed to extract a Key from the PEM certificate") } jwks, err := jwk.ParseString(certificate) @@ -345,13 +346,17 @@ func getKeyByKid(kid string, keys []verificationKey) *verificationKey { } // Implements JWT signature verification. -func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey interface{}, digest []byte, signature []byte) error) (ast.Value, error) { - token, err := decodeJWT(a) +func builtinJWTVerify(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hasher func() hash.Hash, verify func(publicKey interface{}, digest []byte, signature []byte) error) (ast.Value, error) { + if found, _, _, valid := getTokenFromCache(bctx, jwt, keyStr); found { + return ast.Boolean(valid), nil + } + + token, err := decodeJWT(jwt) if err != nil { return nil, err } - s, err := builtins.StringOperand(b, 2) + s, err := builtins.StringOperand(keyStr, 2) if err != nil { return nil, err } @@ -375,6 +380,11 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify return nil, err } + done := func(valid bool) (ast.Boolean, error) { + putTokenInCache(bctx, jwt, keyStr, nil, nil, valid) + return ast.Boolean(valid), nil + } + // Validate the JWT signature // First, check if there's a matching key ID (`kid`) in both token header and key(s). @@ -383,7 +393,7 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify if key := getKeyByKid(header.kid, keys); key != nil { err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) - return ast.Boolean(err == nil), nil + return done(err == nil) } } @@ -395,7 +405,7 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify // we'll need to verify to find out err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) if err == nil { - return ast.Boolean(true), nil + return done(true) } } else { if header.alg != key.alg { @@ -403,48 +413,32 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify } err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) if err == nil { - return ast.Boolean(true), nil + return done(true) } } } // None of the keys worked, return false - return ast.Boolean(false), nil + return done(false) } // Implements HS256 (secret) JWT signature verification -func builtinJWTVerifyHS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Decode the JSON Web Token - token, err := decodeJWT(operands[0].Value) - if err != nil { - return err - } - - // Process Secret input - astSecret, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - secret := string(astSecret) - - mac := hmac.New(sha256.New, []byte(secret)) - _, err = mac.Write([]byte(token.header + "." + token.payload)) - if err != nil { - return err - } +func builtinJWTVerifyHS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return builtinJWTVerifyHS(bctx, operands, sha256.New, iter) +} - signature, err := token.decodeSignature() - if err != nil { - return err - } +// Implements HS384 JWT signature verification +func builtinJWTVerifyHS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return builtinJWTVerifyHS(bctx, operands, sha512.New384, iter) +} - return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil))))) +// Implements HS512 JWT signature verification +func builtinJWTVerifyHS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return builtinJWTVerifyHS(bctx, operands, sha512.New, iter) } -// Implements HS384 JWT signature verification -func builtinJWTVerifyHS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Decode the JSON Web Token - token, err := decodeJWT(operands[0].Value) +func builtinJWTVerifyHS(bctx BuiltinContext, operands []*ast.Term, hashF func() hash.Hash, iter func(*ast.Term) error) error { + jwt, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err } @@ -454,38 +448,20 @@ func builtinJWTVerifyHS384(_ BuiltinContext, operands []*ast.Term, iter func(*as if err != nil { return err } - secret := string(astSecret) - - mac := hmac.New(sha512.New384, []byte(secret)) - _, err = mac.Write([]byte(token.header + "." + token.payload)) - if err != nil { - return err - } - signature, err := token.decodeSignature() - if err != nil { - return err + if found, _, _, valid := getTokenFromCache(bctx, jwt, astSecret); found { + return iter(ast.NewTerm(ast.Boolean(valid))) } - return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil))))) -} - -// Implements HS512 JWT signature verification -func builtinJWTVerifyHS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { // Decode the JSON Web Token - token, err := decodeJWT(operands[0].Value) + token, err := decodeJWT(jwt) if err != nil { return err } - // Process Secret input - astSecret, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } secret := string(astSecret) - mac := hmac.New(sha512.New, []byte(secret)) + mac := hmac.New(hashF, []byte(secret)) _, err = mac.Write([]byte(token.header + "." + token.payload)) if err != nil { return err @@ -496,7 +472,11 @@ func builtinJWTVerifyHS512(_ BuiltinContext, operands []*ast.Term, iter func(*as return err } - return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil))))) + valid := hmac.Equal([]byte(signature), mac.Sum(nil)) + + putTokenInCache(bctx, jwt, astSecret, nil, nil, valid) + + return iter(ast.NewTerm(ast.Boolean(valid))) } // -- Full JWT verification and decoding -- @@ -553,7 +533,7 @@ var tokenConstraintTypes = map[string]tokenConstraintHandler{ func tokenConstraintCert(value ast.Value, constraints *tokenConstraints) error { s, ok := value.(ast.String) if !ok { - return fmt.Errorf("cert constraint: must be a string") + return errors.New("cert constraint: must be a string") } keys, err := getKeysFromCertOrJWK(string(s)) @@ -578,14 +558,14 @@ func tokenConstraintTime(value ast.Value, constraints *tokenConstraints) error { func timeFromValue(value ast.Value) (float64, error) { time, ok := value.(ast.Number) if !ok { - return 0, fmt.Errorf("token time constraint: must be a number") + return 0, errors.New("token time constraint: must be a number") } timeFloat, ok := time.Float64() if !ok { - return 0, fmt.Errorf("token time constraint: unvalid float64") + return 0, errors.New("token time constraint: unvalid float64") } if timeFloat < 0 { - return 0, fmt.Errorf("token time constraint: must not be negative") + return 0, errors.New("token time constraint: must not be negative") } return timeFloat, nil } @@ -636,10 +616,10 @@ func (constraints *tokenConstraints) validate() error { keys++ } if keys > 1 { - return fmt.Errorf("duplicate key constraints") + return errors.New("duplicate key constraints") } if keys < 1 { - return fmt.Errorf("no key constraint") + return errors.New("no key constraint") } return nil } @@ -753,7 +733,7 @@ var errSignatureNotVerified = errors.New("signature not verified") func verifyHMAC(key interface{}, hash crypto.Hash, payload []byte, signature []byte) error { macKey, ok := key.([]byte) if !ok { - return fmt.Errorf("incorrect symmetric key type") + return errors.New("incorrect symmetric key type") } mac := hmac.New(hash.New, macKey) if _, err := mac.Write(payload); err != nil { @@ -776,7 +756,7 @@ func verifyAsymmetric(verify tokenVerifyAsymmetricFunction) tokenVerifyFunction func verifyRSAPKCS(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error { publicKeyRsa, ok := key.(*rsa.PublicKey) if !ok { - return fmt.Errorf("incorrect public key type") + return errors.New("incorrect public key type") } if err := rsa.VerifyPKCS1v15(publicKeyRsa, hash, digest, signature); err != nil { return errSignatureNotVerified @@ -787,7 +767,7 @@ func verifyRSAPKCS(key interface{}, hash crypto.Hash, digest []byte, signature [ func verifyRSAPSS(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error { publicKeyRsa, ok := key.(*rsa.PublicKey) if !ok { - return fmt.Errorf("incorrect public key type") + return errors.New("incorrect public key type") } if err := rsa.VerifyPSS(publicKeyRsa, hash, digest, signature, nil); err != nil { return errSignatureNotVerified @@ -803,7 +783,7 @@ func verifyECDSA(key interface{}, _ crypto.Hash, digest []byte, signature []byte }() publicKeyEcdsa, ok := key.(*ecdsa.PublicKey) if !ok { - return fmt.Errorf("incorrect public key type") + return errors.New("incorrect public key type") } r, s := &big.Int{}, &big.Int{} n := len(signature) / 2 @@ -852,19 +832,19 @@ var tokenHeaderTypes = map[string]tokenHeaderHandler{ func tokenHeaderCrit(header *tokenHeader, value ast.Value) error { v, ok := value.(*ast.Array) if !ok { - return fmt.Errorf("crit: must be a list") + return errors.New("crit: must be a list") } header.crit = map[string]bool{} _ = v.Iter(func(elem *ast.Term) error { tv, ok := elem.Value.(ast.String) if !ok { - return fmt.Errorf("crit: must be a list of strings") + return errors.New("crit: must be a list of strings") } header.crit[string(tv)] = true return nil }) if len(header.crit) == 0 { - return fmt.Errorf("crit: must be a nonempty list") // 'MUST NOT' use the empty list + return errors.New("crit: must be a nonempty list") // 'MUST NOT' use the empty list } return nil } @@ -923,7 +903,7 @@ func commonBuiltinJWTEncodeSign(bctx BuiltinContext, inputHeaders, jwsPayload, j return err } if jwk.GetKeyTypeFromKey(key) != keys.Keys[0].GetKeyType() { - return fmt.Errorf("JWK derived key type and keyType parameter do not match") + return errors.New("JWK derived key type and keyType parameter do not match") } standardHeaders := &jws.StandardHeaders{} @@ -934,11 +914,11 @@ func commonBuiltinJWTEncodeSign(bctx BuiltinContext, inputHeaders, jwsPayload, j } alg := standardHeaders.GetAlgorithm() if alg == jwa.Unsupported { - return fmt.Errorf("unknown signature algorithm") + return errors.New("unknown signature algorithm") } if (standardHeaders.Type == "" || standardHeaders.Type == headerJwt) && !json.Valid([]byte(jwsPayload)) { - return fmt.Errorf("type is JWT but payload is not JSON") + return errors.New("type is JWT but payload is not JSON") } // process payload and sign @@ -1024,7 +1004,7 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func } unverified := ast.ArrayTerm( - ast.BooleanTerm(false), + ast.InternedBooleanTerm(false), ast.NewTerm(ast.NewObject()), ast.NewTerm(ast.NewObject()), ) @@ -1036,57 +1016,115 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func return err } var token *JSONWebToken - var p *ast.Term - for { - // RFC7519 7.2 #1-2 split into parts - if token, err = decodeJWT(a); err != nil { - return err - } - // RFC7519 7.2 #3, #4, #6 - if err := token.decodeHeader(); err != nil { - return err - } - // RFC7159 7.2 #5 (and RFC7159 5.2 #5) validate header fields - header, err := parseTokenHeader(token) - if err != nil { - return err - } - if !header.valid() { + var payload ast.Object + var header ast.Object + + // FIXME: optimize + k, _ := b.Filter(ast.NewObject( + ast.Item(ast.StringTerm("secret"), ast.ObjectTerm()), + ast.Item(ast.StringTerm("cert"), ast.ObjectTerm()), + )) + + if found, th, tp, validSignature := getTokenFromCache(bctx, a, k); found { + if !validSignature { + // For the given token and key(s), the signature is invalid return iter(unverified) } - // Check constraints that impact signature verification. - if constraints.alg != "" && constraints.alg != header.alg { - return iter(unverified) - } - // RFC7159 7.2 #7 verify the signature - signature, err := token.decodeSignature() - if err != nil { - return err + + if th != nil && tp != nil { + header = th + payload = tp + } else { + // Cache entry was created by one of the other built-ins that doesn't decode header/payload + + if token, err = decodeJWT(a); err != nil { + return err + } + + header = token.decodedHeader + + p, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) + if err != nil { + return fmt.Errorf("JWT payload had invalid encoding: %v", err) + } + + payload, err = extractJSONObject(string(p.Value.(ast.String))) + if err != nil { + return err + } + + putTokenInCache(bctx, a, k, header, payload, true) } - if err := constraints.verify(header.kid, header.alg, token.header, token.payload, signature); err != nil { - if err == errSignatureNotVerified { + } else { + var p *ast.Term + + for { + // RFC7519 7.2 #1-2 split into parts + if token, err = decodeJWT(a); err != nil { + return err + } + + // RFC7519 7.2 #3, #4, #6 + if err := token.decodeHeader(); err != nil { + return err + } + + // RFC7159 7.2 #5 (and RFC7159 5.2 #5) validate header fields + header, err := parseTokenHeader(token) + if err != nil { + return err + } + + if !header.valid() { return iter(unverified) } - return err + + // Check constraints that impact signature verification. + if constraints.alg != "" && constraints.alg != header.alg { + return iter(unverified) + } + + // RFC7159 7.2 #7 verify the signature + signature, err := token.decodeSignature() + if err != nil { + return err + } + + if err := constraints.verify(header.kid, header.alg, token.header, token.payload, signature); err != nil { + if err == errSignatureNotVerified { + putTokenInCache(bctx, a, k, nil, nil, false) + return iter(unverified) + } + return err + } + + // RFC7159 7.2 #9-10 decode the payload + p, err = getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) + if err != nil { + return fmt.Errorf("JWT payload had invalid encoding: %v", err) + } + + // RFC7159 7.2 #8 and 5.2 cty + if strings.EqualFold(header.cty, headerJwt) { + // Nested JWT, go round again with payload as first argument + a = p.Value + continue + } + + // Non-nested JWT (or we've reached the bottom of the nesting). + break } - // RFC7159 7.2 #9-10 decode the payload - p, err = getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) + + payload, err = extractJSONObject(string(p.Value.(ast.String))) if err != nil { - return fmt.Errorf("JWT payload had invalid encoding: %v", err) - } - // RFC7159 7.2 #8 and 5.2 cty - if strings.ToUpper(header.cty) == headerJwt { - // Nested JWT, go round again with payload as first argument - a = p.Value - continue + return err } - // Non-nested JWT (or we've reached the bottom of the nesting). - break - } - payload, err := extractJSONObject(string(p.Value.(ast.String))) - if err != nil { - return err + + header = token.decodedHeader + + putTokenInCache(bctx, a, k, header, payload, true) } + // Check registered claim names against constraints or environment // RFC7159 4.1.1 iss if constraints.iss != "" { @@ -1111,34 +1149,34 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func } // RFC7159 4.1.4 exp if exp := payload.Get(jwtExpKey); exp != nil { - switch exp.Value.(type) { + switch v := exp.Value.(type) { case ast.Number: // constraints.time is in nanoseconds but exp Value is in seconds compareTime := ast.FloatNumberTerm(constraints.time / 1000000000) - if ast.Compare(compareTime, exp.Value.(ast.Number)) != -1 { + if ast.Compare(compareTime, v) != -1 { return iter(unverified) } default: - return fmt.Errorf("exp value must be a number") + return errors.New("exp value must be a number") } } // RFC7159 4.1.5 nbf if nbf := payload.Get(jwtNbfKey); nbf != nil { - switch nbf.Value.(type) { + switch v := nbf.Value.(type) { case ast.Number: // constraints.time is in nanoseconds but nbf Value is in seconds compareTime := ast.FloatNumberTerm(constraints.time / 1000000000) - if ast.Compare(compareTime, nbf.Value.(ast.Number)) == -1 { + if ast.Compare(compareTime, v) == -1 { return iter(unverified) } default: - return fmt.Errorf("nbf value must be a number") + return errors.New("nbf value must be a number") } } verified := ast.ArrayTerm( - ast.BooleanTerm(true), - ast.NewTerm(token.decodedHeader), + ast.InternedBooleanTerm(true), + ast.NewTerm(header), ast.NewTerm(payload), ) return iter(verified) @@ -1226,7 +1264,63 @@ func getInputSHA(input []byte, h func() hash.Hash) []byte { return hasher.Sum(nil) } +type jwtCacheEntry struct { + payload ast.Object + header ast.Object + validSignature bool +} + +const tokenCacheName = "io_jwt" + +func getTokenFromCache(bctx BuiltinContext, serializedJwt ast.Value, publicKey ast.Value) (bool, ast.Object, ast.Object, bool) { + if bctx.InterQueryBuiltinValueCache == nil { + return false, nil, nil, false + } + + c := bctx.InterQueryBuiltinValueCache.GetCache(tokenCacheName) + if c == nil { + return false, nil, nil, false + } + + key := createTokenCacheKey(serializedJwt, publicKey) + + entry, ok := c.Get(key) + if !ok { + return false, nil, nil, false + } + + if jwtEntry, ok := entry.(jwtCacheEntry); ok { + return true, jwtEntry.header, jwtEntry.payload, jwtEntry.validSignature + } + + return false, nil, nil, false +} + +func putTokenInCache(bctx BuiltinContext, serializedJwt ast.Value, publicKey ast.Value, header ast.Object, payload ast.Object, validSignature bool) { + if bctx.InterQueryBuiltinValueCache == nil { + return + } + + c := bctx.InterQueryBuiltinValueCache.GetCache(tokenCacheName) + if c == nil { + return + } + + key := createTokenCacheKey(serializedJwt, publicKey) + + c.Insert(key, jwtCacheEntry{header: header, payload: payload, validSignature: validSignature}) +} + +func createTokenCacheKey(serializedJwt ast.Value, publicKey ast.Value) ast.Value { + // We need to create a key that is unique to the serialized JWT (for lookup) and the public key used to verify it, + // so that we don't get a misleading cached validation result for a different, invalid key. + return ast.NewArray(ast.NewTerm(serializedJwt), ast.NewTerm(publicKey)) +} + func init() { + // By default, the JWT cache is disabled. + cache.RegisterDefaultInterQueryBuiltinValueCacheConfig(tokenCacheName, nil) + RegisterBuiltinFunc(ast.JWTDecode.Name, builtinJWTDecode) RegisterBuiltinFunc(ast.JWTVerifyRS256.Name, builtinJWTVerifyRS256) RegisterBuiltinFunc(ast.JWTVerifyRS384.Name, builtinJWTVerifyRS384) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/trace.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go similarity index 98% rename from vendor/github.com/open-policy-agent/opa/topdown/trace.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go index 277c94b626c..070e254d280 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/trace.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go @@ -13,8 +13,8 @@ import ( iStrs "github.com/open-policy-agent/opa/internal/strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) const ( @@ -199,7 +199,7 @@ func (l *legacyTracer) Enabled() bool { return l.t.Enabled() } -func (l *legacyTracer) Config() TraceConfig { +func (*legacyTracer) Config() TraceConfig { return TraceConfig{ PlugLocalVars: true, // For backwards compatibility old tracers will plug local variables } @@ -241,7 +241,7 @@ func (b *BufferTracer) TraceEvent(evt Event) { } // Config returns the Tracers standard configuration -func (b *BufferTracer) Config() TraceConfig { +func (*BufferTracer) Config() TraceConfig { return TraceConfig{PlugLocalVars: true} } @@ -373,10 +373,6 @@ func exprLocalVars(e *Event) *ast.ValueMap { vars := ast.NewValueMap() findVars := func(term *ast.Term) bool { - //if r, ok := term.Value.(ast.Ref); ok { - // fmt.Printf("ref: %v\n", r) - // //return true - //} if name, ok := term.Value.(ast.Var); ok { if meta, ok := e.LocalMetadata[name]; ok { if val := e.Locals.Get(name); val != nil { @@ -537,7 +533,7 @@ func builtinTrace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term } if !bctx.TraceEnabled { - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) } evt := Event{ @@ -552,7 +548,7 @@ func builtinTrace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term bctx.QueryTracers[i].TraceEvent(evt) } - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) } func rewrite(event *Event) *Event { @@ -867,7 +863,7 @@ func printArrows(w *bytes.Buffer, l []varInfo, printValueAt int) { spaces = (col - prevCol) - 1 } - for j := 0; j < spaces; j++ { + for j := range spaces { tab := false for _, t := range info.exprLoc.Tabs { if t == j+prevCol+1 { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/type.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go similarity index 71% rename from vendor/github.com/open-policy-agent/opa/topdown/type.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/type.go index dab5c853cd9..6103fbe4845 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/type.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go @@ -5,69 +5,69 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) func builtinIsNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch operands[0].Value.(type) { case ast.Number: - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) default: - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } } func builtinIsString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch operands[0].Value.(type) { case ast.String: - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) default: - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } } func builtinIsBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch operands[0].Value.(type) { case ast.Boolean: - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) default: - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } } func builtinIsArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch operands[0].Value.(type) { case *ast.Array: - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) default: - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } } func builtinIsSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch operands[0].Value.(type) { case ast.Set: - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) default: - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } } func builtinIsObject(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch operands[0].Value.(type) { case ast.Object: - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) default: - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } } func builtinIsNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch operands[0].Value.(type) { case ast.Null: - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) default: - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go new file mode 100644 index 00000000000..a611e8f30eb --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go @@ -0,0 +1,46 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "errors" + + "github.com/open-policy-agent/opa/v1/ast" +) + +var ( + nullStringTerm = ast.StringTerm("null") + booleanStringTerm = ast.StringTerm("boolean") + numberStringTerm = ast.StringTerm("number") + stringStringTerm = ast.StringTerm("string") + arrayStringTerm = ast.StringTerm("array") + objectStringTerm = ast.StringTerm("object") + setStringTerm = ast.StringTerm("set") +) + +func builtinTypeName(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Null: + return iter(nullStringTerm) + case ast.Boolean: + return iter(booleanStringTerm) + case ast.Number: + return iter(numberStringTerm) + case ast.String: + return iter(stringStringTerm) + case *ast.Array: + return iter(arrayStringTerm) + case ast.Object: + return iter(objectStringTerm) + case ast.Set: + return iter(setStringTerm) + } + + return errors.New("illegal value") +} + +func init() { + RegisterBuiltinFunc(ast.TypeNameBuiltin.Name, builtinTypeName) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/uuid.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go similarity index 92% rename from vendor/github.com/open-policy-agent/opa/topdown/uuid.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go index d3a7a5f900c..d013df9feab 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/uuid.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go @@ -5,9 +5,9 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/uuid" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type uuidCachingKey string diff --git a/vendor/github.com/open-policy-agent/opa/topdown/walk.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go similarity index 61% rename from vendor/github.com/open-policy-agent/opa/topdown/walk.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go index 0f3b3544b52..43aa29c97a8 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/walk.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go @@ -5,18 +5,20 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) +var emptyArr = ast.ArrayTerm() + func evalWalk(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { input := operands[0] if pathIsWildcard(operands) { // When the path assignment is a wildcard: walk(input, [_, value]) // we may skip the path construction entirely, and simply return - // same pointer in each iteration. This is a much more efficient + // same pointer in each iteration. This is a *much* more efficient // path when only the values are needed. - return walkNoPath(input, iter) + return walkNoPath(ast.ArrayTerm(emptyArr, input), iter) } filter := getOutputPath(operands) @@ -24,7 +26,6 @@ func evalWalk(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error } func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) error { - if filter == nil || filter.Len() == 0 { if path == nil { path = ast.NewArray() @@ -49,58 +50,63 @@ func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) switch v := input.Value.(type) { case *ast.Array: - for i := 0; i < v.Len(); i++ { - path = pathAppend(path, ast.IntNumberTerm(i)) - if err := walk(filter, path, v.Elem(i), iter); err != nil { + for i := range v.Len() { + if err := walk(filter, pathAppend(path, ast.InternedIntNumberTerm(i)), v.Elem(i), iter); err != nil { return err } - path = path.Slice(0, path.Len()-1) } case ast.Object: - return v.Iter(func(k, v *ast.Term) error { - path = pathAppend(path, k) - if err := walk(filter, path, v, iter); err != nil { + for _, k := range v.Keys() { + if err := walk(filter, pathAppend(path, k), v.Get(k), iter); err != nil { return err } - path = path.Slice(0, path.Len()-1) - return nil - }) + } case ast.Set: - return v.Iter(func(elem *ast.Term) error { - path = pathAppend(path, elem) - if err := walk(filter, path, elem, iter); err != nil { + for _, elem := range v.Slice() { + if err := walk(filter, pathAppend(path, elem), elem, iter); err != nil { return err } - path = path.Slice(0, path.Len()-1) - return nil - }) + } } return nil } -var emptyArr = ast.ArrayTerm() - func walkNoPath(input *ast.Term, iter func(*ast.Term) error) error { - if err := iter(ast.ArrayTerm(emptyArr, input)); err != nil { + // Note: the path array is embedded in the input from the start here + // in order to avoid an extra allocation per iteration. This leads to + // a little convoluted code below in order to extract and set the value, + // but since walk is commonly used to traverse large data structures, + // the performance gain is worth it. + if err := iter(input); err != nil { return err } - switch v := input.Value.(type) { + inputArray := input.Value.(*ast.Array) + value := inputArray.Get(ast.InternedIntNumberTerm(1)).Value + + switch v := value.(type) { case ast.Object: - return v.Iter(func(_, v *ast.Term) error { - return walkNoPath(v, iter) - }) + for _, k := range v.Keys() { + inputArray.Set(1, v.Get(k)) + if err := walkNoPath(input, iter); err != nil { + return err + } + } case *ast.Array: - for i := 0; i < v.Len(); i++ { - if err := walkNoPath(v.Elem(i), iter); err != nil { + for i := range v.Len() { + inputArray.Set(1, v.Elem(i)) + if err := walkNoPath(input, iter); err != nil { return err } } case ast.Set: - return v.Iter(func(elem *ast.Term) error { - return walkNoPath(elem, iter) - }) + for _, elem := range v.Slice() { + inputArray.Set(1, elem) + if err := walkNoPath(input, iter); err != nil { + return err + } + } } return nil diff --git a/vendor/github.com/open-policy-agent/opa/tracing/tracing.go b/vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/tracing/tracing.go rename to vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/types/decode.go b/vendor/github.com/open-policy-agent/opa/v1/types/decode.go new file mode 100644 index 00000000000..e3e1e983706 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/types/decode.go @@ -0,0 +1,191 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package types + +import ( + "encoding/json" + "fmt" + + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + typeNull = "null" + typeBoolean = "boolean" + typeNumber = "number" + typeString = "string" + typeArray = "array" + typeSet = "set" + typeObject = "object" + typeAny = "any" + typeFunction = "function" +) + +// Unmarshal deserializes bs and returns the resulting type. +func Unmarshal(bs []byte) (result Type, err error) { + + var hint rawtype + + if err = util.UnmarshalJSON(bs, &hint); err == nil { + switch hint.Type { + case typeNull: + result = Nl + case typeBoolean: + result = B + case typeNumber: + result = N + case typeString: + result = S + case typeArray: + var arr rawarray + if err = util.UnmarshalJSON(bs, &arr); err == nil { + var err error + var static []Type + var dynamic Type + if static, err = unmarshalSlice(arr.Static); err != nil { + return nil, err + } + if len(arr.Dynamic) != 0 { + if dynamic, err = Unmarshal(arr.Dynamic); err != nil { + return nil, err + } + } + result = NewArray(static, dynamic) + } + case typeObject: + var obj rawobject + if err = util.UnmarshalJSON(bs, &obj); err == nil { + var err error + var static []*StaticProperty + var dynamic *DynamicProperty + if static, err = unmarshalStaticPropertySlice(obj.Static); err != nil { + return nil, err + } + if dynamic, err = unmarshalDynamicProperty(obj.Dynamic); err != nil { + return nil, err + } + result = NewObject(static, dynamic) + } + case typeSet: + var set rawset + if err = util.UnmarshalJSON(bs, &set); err == nil { + var of Type + if of, err = Unmarshal(set.Of); err == nil { + result = NewSet(of) + } + } + case typeAny: + var union rawunion + if err = util.UnmarshalJSON(bs, &union); err == nil { + var of []Type + if of, err = unmarshalSlice(union.Of); err == nil { + result = NewAny(of...) + } + } + case typeFunction: + var decl rawdecl + if err = util.UnmarshalJSON(bs, &decl); err == nil { + args, err := unmarshalSlice(decl.Args) + if err != nil { + return nil, err + } + var ret Type + if len(decl.Result) > 0 { + ret, err = Unmarshal(decl.Result) + if err != nil { + return nil, err + } + } + if len(decl.Variadic) > 0 { + varargs, err := Unmarshal(decl.Variadic) + if err != nil { + return nil, err + } + result = NewVariadicFunction(args, varargs, ret) + } else { + result = NewFunction(args, ret) + } + } + default: + err = fmt.Errorf("unsupported type '%v'", hint.Type) + } + } + + return result, err +} + +type rawtype struct { + Type string `json:"type"` +} + +type rawarray struct { + Static []json.RawMessage `json:"static"` + Dynamic json.RawMessage `json:"dynamic"` +} + +type rawobject struct { + Static []rawstaticproperty `json:"static"` + Dynamic rawdynamicproperty `json:"dynamic"` +} + +type rawstaticproperty struct { + Key interface{} `json:"key"` + Value json.RawMessage `json:"value"` +} + +type rawdynamicproperty struct { + Key json.RawMessage `json:"key"` + Value json.RawMessage `json:"value"` +} + +type rawset struct { + Of json.RawMessage `json:"of"` +} + +type rawunion struct { + Of []json.RawMessage `json:"of"` +} + +type rawdecl struct { + Args []json.RawMessage `json:"args"` + Result json.RawMessage `json:"result"` + Variadic json.RawMessage `json:"variadic"` +} + +func unmarshalSlice(elems []json.RawMessage) (result []Type, err error) { + result = make([]Type, len(elems)) + for i := range elems { + if result[i], err = Unmarshal(elems[i]); err != nil { + return nil, err + } + } + return result, err +} + +func unmarshalStaticPropertySlice(elems []rawstaticproperty) (result []*StaticProperty, err error) { + result = make([]*StaticProperty, len(elems)) + for i := range elems { + value, err := Unmarshal(elems[i].Value) + if err != nil { + return nil, err + } + result[i] = NewStaticProperty(elems[i].Key, value) + } + return result, err +} + +func unmarshalDynamicProperty(x rawdynamicproperty) (result *DynamicProperty, err error) { + if len(x.Key) == 0 { + return nil, nil + } + var key Type + if key, err = Unmarshal(x.Key); err == nil { + var value Type + if value, err = Unmarshal(x.Value); err == nil { + return NewDynamicProperty(key, value), nil + } + } + return nil, err +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/types/types.go b/vendor/github.com/open-policy-agent/opa/v1/types/types.go new file mode 100644 index 00000000000..c661e96666e --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/types/types.go @@ -0,0 +1,1222 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package types declares data types for Rego values and helper functions to +// operate on these types. +package types + +import ( + "encoding/json" + "errors" + "fmt" + "slices" + "sort" + "strings" + + "github.com/open-policy-agent/opa/v1/util" +) + +// Sprint returns the string representation of the type. +func Sprint(x Type) string { + if x == nil { + return "???" + } + return x.String() +} + +// Type represents a type of a term in the language. +type Type interface { + String() string + typeMarker() string + json.Marshaler +} + +func (Null) typeMarker() string { return typeNull } +func (Boolean) typeMarker() string { return typeBoolean } +func (Number) typeMarker() string { return typeNumber } +func (String) typeMarker() string { return typeString } +func (*Array) typeMarker() string { return typeArray } +func (*Object) typeMarker() string { return typeObject } +func (*Set) typeMarker() string { return typeSet } +func (Any) typeMarker() string { return typeAny } +func (Function) typeMarker() string { return typeFunction } + +// Null represents the null type. +type Null struct{} + +// NewNull returns a new Null type. +func NewNull() Null { + return Null{} +} + +var Nl Type = NewNull() + +// NamedType represents a type alias with an arbitrary name and description. +// This is useful for generating documentation for built-in functions. +type NamedType struct { + Name, Descr string + Type Type +} + +func (n *NamedType) typeMarker() string { return n.Type.typeMarker() } +func (n *NamedType) String() string { return n.Name + ": " + n.Type.String() } +func (n *NamedType) MarshalJSON() ([]byte, error) { + var obj map[string]interface{} + switch x := n.Type.(type) { + case interface{ toMap() map[string]interface{} }: + obj = x.toMap() + default: + obj = map[string]interface{}{ + "type": n.Type.typeMarker(), + } + } + obj["name"] = n.Name + if n.Descr != "" { + obj["description"] = n.Descr + } + return json.Marshal(obj) +} + +func (n *NamedType) Description(d string) *NamedType { + n.Descr = d + return n +} + +// Named returns the passed type as a named type. +// Named types are only valid at the top level of built-in functions. +// Note that nested named types cause panic. +func Named(name string, t Type) *NamedType { + return &NamedType{ + Type: t, + Name: name, + } +} + +// MarshalJSON returns the JSON encoding of t. +func (t Null) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "type": t.typeMarker(), + }) +} + +func unwrap(t Type) Type { + switch t := t.(type) { + case *NamedType: + return t.Type + default: + return t + } +} + +func (Null) String() string { + return typeNull +} + +// Boolean represents the boolean type. +type Boolean struct{} + +// B represents an instance of the boolean type. +var B Type = NewBoolean() + +// NewBoolean returns a new Boolean type. +func NewBoolean() Boolean { + return Boolean{} +} + +// MarshalJSON returns the JSON encoding of t. +func (t Boolean) MarshalJSON() ([]byte, error) { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + return json.Marshal(repr) +} + +func (t Boolean) String() string { + return t.typeMarker() +} + +// String represents the string type. +type String struct{} + +// S represents an instance of the string type. +var S Type = NewString() + +// NewString returns a new String type. +func NewString() String { + return String{} +} + +// MarshalJSON returns the JSON encoding of t. +func (t String) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "type": t.typeMarker(), + }) +} + +func (String) String() string { + return typeString +} + +// Number represents the number type. +type Number struct{} + +// N represents an instance of the number type. +var N Type = NewNumber() + +// NewNumber returns a new Number type. +func NewNumber() Number { + return Number{} +} + +// MarshalJSON returns the JSON encoding of t. +func (t Number) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "type": t.typeMarker(), + }) +} + +func (Number) String() string { + return typeNumber +} + +// Array represents the array type. +type Array struct { + static []Type // static items + dynamic Type // dynamic items +} + +// NewArray returns a new Array type. +func NewArray(static []Type, dynamic Type) *Array { + return &Array{ + static: static, + dynamic: dynamic, + } +} + +// MarshalJSON returns the JSON encoding of t. +func (t *Array) MarshalJSON() ([]byte, error) { + return json.Marshal(t.toMap()) +} + +func (t *Array) toMap() map[string]interface{} { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if len(t.static) != 0 { + repr["static"] = t.static + } + if t.dynamic != nil { + repr["dynamic"] = t.dynamic + } + return repr +} + +func (t *Array) String() string { + prefix := "array" + buf := []string{} + for _, tpe := range t.static { + buf = append(buf, Sprint(tpe)) + } + repr := prefix + if len(buf) > 0 { + repr += "<" + strings.Join(buf, ", ") + ">" + } + if t.dynamic != nil { + repr += "[" + t.dynamic.String() + "]" + } + return repr +} + +// Dynamic returns the type of the array's dynamic elements. +func (t *Array) Dynamic() Type { + return t.dynamic +} + +// Len returns the number of static array elements. +func (t *Array) Len() int { + return len(t.static) +} + +// Select returns the type of element at the zero-based pos. +func (t *Array) Select(pos int) Type { + if pos >= 0 { + if len(t.static) > pos { + return t.static[pos] + } + if t.dynamic != nil { + return t.dynamic + } + } + return nil +} + +// Set represents the set type. +type Set struct { + of Type +} + +// Boxed set types. +var ( + SetOfAny Type = NewSet(A) + SetOfStr Type = NewSet(S) + SetOfNum Type = NewSet(N) +) + +// NewSet returns a new Set type. +func NewSet(of Type) *Set { + return &Set{ + of: of, + } +} + +func (t *Set) Of() Type { + return t.of +} + +// MarshalJSON returns the JSON encoding of t. +func (t *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(t.toMap()) +} + +func (t *Set) toMap() map[string]interface{} { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if t.of != nil { + repr["of"] = t.of + } + return repr +} + +func (t *Set) String() string { + prefix := typeSet + return prefix + "[" + Sprint(t.of) + "]" +} + +// StaticProperty represents a static object property. +type StaticProperty struct { + Key interface{} + Value Type +} + +// NewStaticProperty returns a new StaticProperty object. +func NewStaticProperty(key interface{}, value Type) *StaticProperty { + return &StaticProperty{ + Key: key, + Value: value, + } +} + +// MarshalJSON returns the JSON encoding of p. +func (p *StaticProperty) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "key": p.Key, + "value": p.Value, + }) +} + +// DynamicProperty represents a dynamic object property. +type DynamicProperty struct { + Key Type + Value Type +} + +// NewDynamicProperty returns a new DynamicProperty object. +func NewDynamicProperty(key, value Type) *DynamicProperty { + return &DynamicProperty{ + Key: key, + Value: value, + } +} + +// MarshalJSON returns the JSON encoding of p. +func (p *DynamicProperty) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "key": p.Key, + "value": p.Value, + }) +} + +func (p *DynamicProperty) String() string { + return fmt.Sprintf("%s: %s", Sprint(p.Key), Sprint(p.Value)) +} + +// Object represents the object type. +type Object struct { + static []*StaticProperty // constant properties + dynamic *DynamicProperty // dynamic properties +} + +// NewObject returns a new Object type. +func NewObject(static []*StaticProperty, dynamic *DynamicProperty) *Object { + slices.SortFunc(static, func(a, b *StaticProperty) int { + return util.Compare(a.Key, b.Key) + }) + return &Object{ + static: static, + dynamic: dynamic, + } +} + +func (t *Object) String() string { + prefix := "object" + buf := make([]string, 0, len(t.static)) + for _, p := range t.static { + buf = append(buf, fmt.Sprintf("%v: %v", p.Key, Sprint(p.Value))) + } + repr := prefix + if len(buf) > 0 { + repr += "<" + strings.Join(buf, ", ") + ">" + } + if t.dynamic != nil { + repr += "[" + t.dynamic.String() + "]" + } + return repr +} + +// DynamicValue returns the type of the object's dynamic elements. +func (t *Object) DynamicValue() Type { + if t.dynamic == nil { + return nil + } + return t.dynamic.Value +} + +// DynamicProperties returns the type of the object's dynamic elements. +func (t *Object) DynamicProperties() *DynamicProperty { + return t.dynamic +} + +// StaticProperties returns the type of the object's static elements. +func (t *Object) StaticProperties() []*StaticProperty { + return t.static +} + +// Keys returns the keys of the object's static elements. +func (t *Object) Keys() []interface{} { + sl := make([]interface{}, 0, len(t.static)) + for _, p := range t.static { + sl = append(sl, p.Key) + } + return sl +} + +// MarshalJSON returns the JSON encoding of t. +func (t *Object) MarshalJSON() ([]byte, error) { + return json.Marshal(t.toMap()) +} + +func (t *Object) toMap() map[string]interface{} { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if len(t.static) != 0 { + repr["static"] = t.static + } + if t.dynamic != nil { + repr["dynamic"] = t.dynamic + } + return repr +} + +// Select returns the type of the named property. +func (t *Object) Select(name interface{}) Type { + pos := sort.Search(len(t.static), func(x int) bool { + return util.Compare(t.static[x].Key, name) >= 0 + }) + + if pos < len(t.static) && util.Compare(t.static[pos].Key, name) == 0 { + return t.static[pos].Value + } + + if t.dynamic != nil { + if Contains(t.dynamic.Key, TypeOf(name)) { + return t.dynamic.Value + } + } + + return nil +} + +func (t *Object) Merge(other Type) *Object { + if otherObj, ok := other.(*Object); ok { + return mergeObjects(t, otherObj) + } + + var typeK Type + var typeV Type + dynProps := t.DynamicProperties() + if dynProps != nil { + typeK = Or(Keys(other), dynProps.Key) + typeV = Or(Values(other), dynProps.Value) + dynProps = NewDynamicProperty(typeK, typeV) + } else { + typeK = Keys(other) + typeV = Values(other) + if typeK != nil && typeV != nil { + dynProps = NewDynamicProperty(typeK, typeV) + } + } + + return NewObject(t.StaticProperties(), dynProps) +} + +func mergeObjects(a, b *Object) *Object { + var dynamicProps *DynamicProperty + if a.dynamic != nil && b.dynamic != nil { + typeK := Or(a.dynamic.Key, b.dynamic.Key) + var typeV Type + aObj, aIsObj := a.dynamic.Value.(*Object) + bObj, bIsObj := b.dynamic.Value.(*Object) + if aIsObj && bIsObj { + typeV = mergeObjects(aObj, bObj) + } else { + typeV = Or(a.dynamic.Value, b.dynamic.Value) + } + dynamicProps = NewDynamicProperty(typeK, typeV) + } else if a.dynamic != nil { + dynamicProps = a.dynamic + } else { + dynamicProps = b.dynamic + } + + staticPropsMap := make(map[interface{}]Type) + + for _, sp := range a.static { + staticPropsMap[sp.Key] = sp.Value + } + + for _, sp := range b.static { + currV := staticPropsMap[sp.Key] + if currV != nil { + currVObj, currVIsObj := currV.(*Object) + spVObj, spVIsObj := sp.Value.(*Object) + if currVIsObj && spVIsObj { + staticPropsMap[sp.Key] = mergeObjects(currVObj, spVObj) + } else { + staticPropsMap[sp.Key] = Or(currV, sp.Value) + } + } else { + staticPropsMap[sp.Key] = sp.Value + } + } + + staticProps := make([]*StaticProperty, 0, len(staticPropsMap)) + for k, v := range staticPropsMap { + staticProps = append(staticProps, NewStaticProperty(k, v)) + } + + return NewObject(staticProps, dynamicProps) +} + +// Any represents a dynamic type. +type Any []Type + +// A represents the superset of all types. +var A Type = NewAny() + +// NewAny returns a new Any type. +func NewAny(of ...Type) Any { + sl := make(Any, len(of)) + copy(sl, of) + sort.Sort(typeSlice(sl)) + return sl +} + +// Contains returns true if t is a superset of other. +func (t Any) Contains(other Type) bool { + if _, ok := other.(*Function); ok { + return false + } + // Note(philipc): We used to do this as a linear search. + // Since this is always sorted, we can use a binary search instead. + i := sort.Search(len(t), func(i int) bool { + return Compare(t[i], other) >= 0 + }) + if i < len(t) && Compare(t[i], other) == 0 { + // x is present at t[i] + return true + } + return len(t) == 0 +} + +// MarshalJSON returns the JSON encoding of t. +func (t Any) MarshalJSON() ([]byte, error) { + return json.Marshal(t.toMap()) +} + +func (t Any) toMap() map[string]interface{} { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if len(t) != 0 { + repr["of"] = []Type(t) + } + return repr +} + +// Merge return a new Any type that is the superset of t and other. +func (t Any) Merge(other Type) Any { + if otherAny, ok := other.(Any); ok { + return t.Union(otherAny) + } + if t.Contains(other) { + return t + } + cpy := make(Any, len(t)+1) + idx := sort.Search(len(t), func(i int) bool { + return Compare(t[i], other) >= 0 + }) + copy(cpy, t[:idx]) + cpy[idx] = other + copy(cpy[idx+1:], t[idx:]) + return cpy +} + +// Union returns a new Any type that is the union of the two Any types. +// Note(philipc): The two Any slices MUST be sorted before running Union, +// or else this method will fail to merge the two slices correctly. +func (t Any) Union(other Any) Any { + lenT := len(t) + lenOther := len(other) + // Return the more general (blank) Any type if present. + if lenT == 0 { + return t + } + if lenOther == 0 { + return other + } + // Prealloc the output list. + maxLen := lenT + if lenT < lenOther { + maxLen = lenOther + } + merged := make(Any, 0, maxLen) + // Note(philipc): Create a merged slice, doing the minimum number of + // comparisons along the way. We treat this as a problem of merging two + // sorted lists that might have duplicates. This specifically saves us + // from cases where one list might be *much* longer than the other. + // Algorithm: + // Assume: + // - List A + // - List B + // - List Output + // - Idx_a, Idx_b + // Procedure: + // - While Idx_a < len(A) and Idx_b < len(B) + // - Compare head(A) and head(B) + // - Cases: + // - A < B: Append head(A) to Output, advance Idx_a + // - A == B: Append head(A) to Output, advance Idx_a, Idx_b + // - A > B: Append head(B) to Output, advance Idx_b + // - Return output + idxA := 0 + idxB := 0 + for idxA < lenT || idxB < lenOther { + // Early-exit cases: + if idxA == lenT { + // Ran out of elements in t. Copy over what's left from other. + merged = append(merged, other[idxB:]...) + break + } else if idxB == lenOther { + // Ran out of elements in other. Copy over what's left from t. + merged = append(merged, t[idxA:]...) + break + } + // Normal selection of next element to merge: + switch Compare(t[idxA], other[idxB]) { + // A < B: + case -1: + merged = append(merged, t[idxA]) + idxA++ + // A == B: + case 0: + merged = append(merged, t[idxA]) + idxA++ + idxB++ + // A > B: + case 1: + merged = append(merged, other[idxB]) + idxB++ + } + } + return merged +} + +func (t Any) String() string { + prefix := "any" + if len(t) == 0 { + return prefix + } + buf := make([]string, len(t)) + for i := range t { + buf[i] = Sprint(t[i]) + } + return prefix + "<" + strings.Join(buf, ", ") + ">" +} + +// Function represents a function type. +type Function struct { + args []Type + result Type + variadic Type +} + +// Args returns an argument list. +func Args(x ...Type) []Type { + return x +} + +// Void returns true if the function has no return value. This function returns +// false if x is not a function. +func Void(x Type) bool { + f, ok := x.(*Function) + return ok && f.Result() == nil +} + +// Arity returns the number of arguments in the function signature or zero if x +// is not a function. If the type is unknown, this function returns -1. +func Arity(x Type) int { + if x == nil { + return -1 + } + f, ok := x.(*Function) + if !ok { + return 0 + } + return f.Arity() +} + +// NewFunction returns a new Function object of the given argument and result types. +func NewFunction(args []Type, result Type) *Function { + return &Function{ + args: args, + result: result, + } +} + +// NewVariadicFunction returns a new Function object. This function sets the +// variadic bit on the signature. Non-void variadic functions are not currently +// supported. +func NewVariadicFunction(args []Type, varargs Type, result Type) *Function { + if result != nil { + panic("illegal value: non-void variadic functions not supported") + } + return &Function{ + args: args, + variadic: varargs, + result: nil, + } +} + +// FuncArgs returns the function's arguments. +func (t *Function) FuncArgs() FuncArgs { + return FuncArgs{Args: t.Args(), Variadic: unwrap(t.variadic)} +} + +// NamedFuncArgs returns the function's arguments, with a name and +// description if available. +func (t *Function) NamedFuncArgs() FuncArgs { + args := make([]Type, len(t.args)) + copy(args, t.args) + return FuncArgs{Args: args, Variadic: t.variadic} +} + +// Args returns the function's arguments as a slice, ignoring variadic arguments. +// Deprecated: Use FuncArgs instead. +func (t *Function) Args() []Type { + cpy := make([]Type, len(t.args)) + for i := range t.args { + cpy[i] = unwrap(t.args[i]) + } + return cpy +} + +// Arity returns the number of arguments in the function signature. +func (t *Function) Arity() int { + return len(t.args) +} + +// Result returns the function's result type. +func (t *Function) Result() Type { + return unwrap(t.result) +} + +// Result returns the function's result type, without stripping name and description. +func (t *Function) NamedResult() Type { + return t.result +} + +func (t *Function) String() string { + return fmt.Sprintf("%v => %v", t.FuncArgs(), Sprint(t.Result())) +} + +// MarshalJSON returns the JSON encoding of t. +func (t *Function) MarshalJSON() ([]byte, error) { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if len(t.args) > 0 { + repr["args"] = t.args + } + if t.result != nil { + repr["result"] = t.result + } + if t.variadic != nil { + repr["variadic"] = t.variadic + } + return json.Marshal(repr) +} + +// UnmarshalJSON decodes the JSON serialized function declaration. +func (t *Function) UnmarshalJSON(bs []byte) error { + tpe, err := Unmarshal(bs) + if err != nil { + return err + } + + f, ok := tpe.(*Function) + if !ok { + return errors.New("invalid type") + } + + *t = *f + return nil +} + +// Union returns a new function representing the union of t and other. Functions +// must have the same arity to be unioned. +func (t *Function) Union(other *Function) *Function { + if other == nil { + return t + } + if t == nil { + return other + } + + if t.Arity() != other.Arity() { + return nil + } + + tfa := t.FuncArgs() + ofa := other.FuncArgs() + + aIsVariadic := tfa.Variadic != nil + bIsVariadic := ofa.Variadic != nil + + if aIsVariadic && !bIsVariadic { + return nil + } else if bIsVariadic && !aIsVariadic { + return nil + } + + a := t.Args() + b := other.Args() + + args := make([]Type, len(a)) + for i := range a { + args[i] = Or(a[i], b[i]) + } + + result := NewFunction(args, Or(t.Result(), other.Result())) + result.variadic = Or(tfa.Variadic, ofa.Variadic) + + return result +} + +// FuncArgs represents the arguments that can be passed to a function. +type FuncArgs struct { + Args []Type `json:"args,omitempty"` + Variadic Type `json:"variadic,omitempty"` +} + +func (a FuncArgs) String() string { + buf := make([]string, 0, len(a.Args)+1) + for i := range a.Args { + buf = append(buf, Sprint(a.Args[i])) + } + if a.Variadic != nil { + buf = append(buf, Sprint(a.Variadic)+"...") + } + return "(" + strings.Join(buf, ", ") + ")" +} + +// Arg returns the nth argument's type. +func (a FuncArgs) Arg(x int) Type { + if x < len(a.Args) { + return a.Args[x] + } + return a.Variadic +} + +// Compare returns -1, 0, 1 based on comparison between a and b. +func Compare(a, b Type) int { + a, b = unwrap(a), unwrap(b) + x := typeOrder(a) + y := typeOrder(b) + if x > y { + return 1 + } else if x < y { + return -1 + } + switch a.(type) { //nolint:gocritic + case nil, Null, Boolean, Number, String: + return 0 + case *Array: + arrA := a.(*Array) + arrB := b.(*Array) + if arrA.dynamic != nil && arrB.dynamic == nil { + return 1 + } else if arrB.dynamic != nil && arrA.dynamic == nil { + return -1 + } + if arrB.dynamic != nil && arrA.dynamic != nil { + if cmp := Compare(arrA.dynamic, arrB.dynamic); cmp != 0 { + return cmp + } + } + return typeSliceCompare(arrA.static, arrB.static) + case *Object: + objA := a.(*Object) + objB := b.(*Object) + if objA.dynamic != nil && objB.dynamic == nil { + return 1 + } else if objB.dynamic != nil && objA.dynamic == nil { + return -1 + } + if objA.dynamic != nil && objB.dynamic != nil { + if cmp := Compare(objA.dynamic.Key, objB.dynamic.Key); cmp != 0 { + return cmp + } + if cmp := Compare(objA.dynamic.Value, objB.dynamic.Value); cmp != 0 { + return cmp + } + } + + lenStaticA := len(objA.static) + lenStaticB := len(objB.static) + + minLen := lenStaticA + if lenStaticB < minLen { + minLen = lenStaticB + } + + for i := range minLen { + if cmp := util.Compare(objA.static[i].Key, objB.static[i].Key); cmp != 0 { + return cmp + } + if cmp := Compare(objA.static[i].Value, objB.static[i].Value); cmp != 0 { + return cmp + } + } + + if lenStaticA < lenStaticB { + return -1 + } else if lenStaticB < lenStaticA { + return 1 + } + + return 0 + case *Set: + setA := a.(*Set) + setB := b.(*Set) + if setA.of == nil && setB.of == nil { + return 0 + } else if setA.of == nil { + return -1 + } else if setB.of == nil { + return 1 + } + return Compare(setA.of, setB.of) + case Any: + sl1 := typeSlice(a.(Any)) + sl2 := typeSlice(b.(Any)) + return typeSliceCompare(sl1, sl2) + case *Function: + fA := a.(*Function) + fB := b.(*Function) + if len(fA.args) < len(fB.args) { + return -1 + } else if len(fA.args) > len(fB.args) { + return 1 + } + for i := range len(fA.args) { + if cmp := Compare(fA.args[i], fB.args[i]); cmp != 0 { + return cmp + } + } + if cmp := Compare(fA.result, fB.result); cmp != 0 { + return cmp + } + return Compare(fA.variadic, fB.variadic) + default: + panic("unreachable") + } +} + +// Contains returns true if a is a superset or equal to b. +func Contains(a, b Type) bool { + if x, ok := unwrap(a).(Any); ok { + return x.Contains(b) + } + return Compare(a, b) == 0 +} + +// Or returns a type that represents the union of a and b. If one type is a +// superset of the other, the superset is returned unchanged. +func Or(a, b Type) Type { + a, b = unwrap(a), unwrap(b) + if a == nil { + return b + } else if b == nil { + return a + } + fA, ok1 := a.(*Function) + fB, ok2 := b.(*Function) + if ok1 && ok2 { + return fA.Union(fB) + } else if ok1 || ok2 { + return nil + } + anyA, ok1 := a.(Any) + anyB, ok2 := b.(Any) + if ok1 { + return anyA.Merge(b) + } + if ok2 { + return anyB.Merge(a) + } + if Compare(a, b) == 0 { + return a + } + return NewAny(a, b) +} + +// Select returns a property or item of a. +func Select(a Type, x interface{}) Type { + switch a := unwrap(a).(type) { + case *Array: + n, ok := x.(json.Number) + if !ok { + return nil + } + pos, err := n.Int64() + if err != nil { + return nil + } + return a.Select(int(pos)) + case *Object: + return a.Select(x) + case *Set: + tpe := TypeOf(x) + if Compare(a.of, tpe) == 0 { + return a.of + } + if x, ok := a.of.(Any); ok { + if x.Contains(tpe) { + return tpe + } + } + return nil + case Any: + if Compare(a, A) == 0 { + return A + } + var tpe Type + for i := range a { + // TODO(tsandall): test nil/nil + tpe = Or(Select(a[i], x), tpe) + } + return tpe + default: + return nil + } +} + +// Keys returns the type of keys that can be enumerated for a. For arrays, the +// keys are always number types, for objects the keys are always string types, +// and for sets the keys are always the type of the set element. +func Keys(a Type) Type { + switch a := unwrap(a).(type) { + case *Array: + return N + case *Object: + var tpe Type + for _, k := range a.Keys() { + tpe = Or(tpe, TypeOf(k)) + } + if a.dynamic != nil { + tpe = Or(tpe, a.dynamic.Key) + } + return tpe + case *Set: + return a.of + case Any: + // TODO(tsandall): ditto test + if Compare(a, A) == 0 { + return A + } + var tpe Type + for i := range a { + tpe = Or(Keys(a[i]), tpe) + } + return tpe + } + return nil +} + +// Values returns the type of values that can be enumerated for a. +func Values(a Type) Type { + switch a := unwrap(a).(type) { + case *Array: + var tpe Type + for i := range a.static { + tpe = Or(tpe, a.static[i]) + } + return Or(tpe, a.dynamic) + case *Object: + var tpe Type + for i := range a.static { + tpe = Or(tpe, a.static[i].Value) + } + if a.dynamic != nil { + tpe = Or(tpe, a.dynamic.Value) + } + return tpe + case *Set: + return a.of + case Any: + if Compare(a, A) == 0 { + return A + } + var tpe Type + for i := range a { + tpe = Or(Values(a[i]), tpe) + } + return tpe + } + return nil +} + +// Nil returns true if a's type is unknown. +func Nil(a Type) bool { + switch a := unwrap(a).(type) { + case nil: + return true + case *Function: + for i := range a.args { + if Nil(a.args[i]) { + return true + } + } + return Nil(a.result) + case *Array: + for i := range a.static { + if Nil(a.static[i]) { + return true + } + } + if a.dynamic != nil { + return Nil(a.dynamic) + } + case *Object: + for i := range a.static { + if Nil(a.static[i].Value) { + return true + } + } + if a.dynamic != nil { + return Nil(a.dynamic.Key) || Nil(a.dynamic.Value) + } + case *Set: + return Nil(a.of) + } + return false +} + +// TypeOf returns the type of the Golang native value. +func TypeOf(x interface{}) Type { + switch x := x.(type) { + case nil: + return Nl + case bool: + return B + case string: + return S + case json.Number: + return N + case map[string]interface{}: + // The ast.ValueToInterface() function returns ast.Object values as map[string]interface{} + // so map[string]interface{} must be handled here because the type checker uses the value + // to interface conversion when inferring object types. + static := make([]*StaticProperty, 0, len(x)) + for k, v := range x { + static = append(static, NewStaticProperty(k, TypeOf(v))) + } + return NewObject(static, nil) + case map[interface{}]interface{}: + static := make([]*StaticProperty, 0, len(x)) + for k, v := range x { + static = append(static, NewStaticProperty(k, TypeOf(v))) + } + return NewObject(static, nil) + case []interface{}: + static := make([]Type, len(x)) + for i := range x { + static[i] = TypeOf(x[i]) + } + return NewArray(static, nil) + } + panic("unreachable") +} + +type typeSlice []Type + +func (s typeSlice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 } +func (s typeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s typeSlice) Len() int { return len(s) } + +func typeSliceCompare(a, b []Type) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := range minLen { + if cmp := Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } else if len(b) < len(a) { + return 1 + } + return 0 +} + +func typeOrder(x Type) int { + switch unwrap(x).(type) { + case Null: + return 0 + case Boolean: + return 1 + case Number: + return 2 + case String: + return 3 + case *Array: + return 4 + case *Object: + return 5 + case *Set: + return 6 + case Any: + return 7 + case *Function: + return 8 + case nil: + return -1 + } + panic("unreachable") +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go b/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go new file mode 100644 index 00000000000..1558f0cff8c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go @@ -0,0 +1,42 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "math/rand" + "time" +) + +// DefaultBackoff returns a delay with an exponential backoff based on the +// number of retries. +func DefaultBackoff(base, maxNS float64, retries int) time.Duration { + return Backoff(base, maxNS, .2, 1.6, retries) +} + +// Backoff returns a delay with an exponential backoff based on the number of +// retries. Same algorithm used in gRPC. +func Backoff(base, maxNS, jitter, factor float64, retries int) time.Duration { + if retries == 0 { + return 0 + } + + backoff, maxNS := base, maxNS + for backoff < maxNS && retries > 0 { + backoff *= factor + retries-- + } + if backoff > maxNS { + backoff = maxNS + } + + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + + return time.Duration(backoff) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/channel.go b/vendor/github.com/open-policy-agent/opa/v1/util/channel.go new file mode 100644 index 00000000000..e2653ac7fdb --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/channel.go @@ -0,0 +1,32 @@ +package util + +import ( + "github.com/open-policy-agent/opa/v1/metrics" +) + +// This prevents getting blocked forever writing to a full buffer, in case another routine fills the last space. +// Retrying maxEventRetry times to drop the oldest event. Dropping the incoming event if there still isn't room. +const maxEventRetry = 1000 + +// PushFIFO pushes data into a buffered channel without blocking when full, making room by dropping the oldest data. +// An optional metric can be recorded when data is dropped. +func PushFIFO[T any](buffer chan T, data T, metrics metrics.Metrics, metricName string) { + + for range maxEventRetry { + // non-blocking send to the buffer, to prevent blocking if buffer is full so room can be made. + select { + case buffer <- data: + return + default: + } + + // non-blocking drop from the buffer to make room for incoming event + select { + case <-buffer: + if metrics != nil && metricName != "" { + metrics.Counter(metricName).Incr() + } + default: + } + } +} diff --git a/vendor/github.com/open-policy-agent/opa/util/close.go b/vendor/github.com/open-policy-agent/opa/v1/util/close.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/close.go rename to vendor/github.com/open-policy-agent/opa/v1/util/close.go diff --git a/vendor/github.com/open-policy-agent/opa/util/compare.go b/vendor/github.com/open-policy-agent/opa/v1/util/compare.go similarity index 92% rename from vendor/github.com/open-policy-agent/opa/util/compare.go rename to vendor/github.com/open-policy-agent/opa/v1/util/compare.go index 8ae77536907..2569375b190 100644 --- a/vendor/github.com/open-policy-agent/opa/util/compare.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/compare.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "math/big" - "sort" ) // Compare returns 0 if a equals b, -1 if a is less than b, and 1 if b is than a. @@ -83,7 +82,7 @@ func Compare(a, b interface{}) int { if bLen < minLen { minLen = bLen } - for i := 0; i < minLen; i++ { + for i := range minLen { cmp := Compare(a[i], b[i]) if cmp != 0 { return cmp @@ -99,23 +98,15 @@ func Compare(a, b interface{}) int { case map[string]interface{}: switch b := b.(type) { case map[string]interface{}: - var aKeys []string - for k := range a { - aKeys = append(aKeys, k) - } - var bKeys []string - for k := range b { - bKeys = append(bKeys, k) - } - sort.Strings(aKeys) - sort.Strings(bKeys) + aKeys := KeysSorted(a) + bKeys := KeysSorted(b) aLen := len(aKeys) bLen := len(bKeys) minLen := aLen if bLen < minLen { minLen = bLen } - for i := 0; i < minLen; i++ { + for i := range minLen { if aKeys[i] < bKeys[i] { return -1 } else if bKeys[i] < aKeys[i] { diff --git a/vendor/github.com/open-policy-agent/opa/util/decoding/context.go b/vendor/github.com/open-policy-agent/opa/v1/util/decoding/context.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/decoding/context.go rename to vendor/github.com/open-policy-agent/opa/v1/util/decoding/context.go diff --git a/vendor/github.com/open-policy-agent/opa/util/doc.go b/vendor/github.com/open-policy-agent/opa/v1/util/doc.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/doc.go rename to vendor/github.com/open-policy-agent/opa/v1/util/doc.go diff --git a/vendor/github.com/open-policy-agent/opa/util/enumflag.go b/vendor/github.com/open-policy-agent/opa/v1/util/enumflag.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/enumflag.go rename to vendor/github.com/open-policy-agent/opa/v1/util/enumflag.go diff --git a/vendor/github.com/open-policy-agent/opa/util/graph.go b/vendor/github.com/open-policy-agent/opa/v1/util/graph.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/graph.go rename to vendor/github.com/open-policy-agent/opa/v1/util/graph.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go b/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go new file mode 100644 index 00000000000..cf6a385f41c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go @@ -0,0 +1,271 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "fmt" + "strings" +) + +// T is a concise way to refer to T. +type T interface{} + +type Hasher interface { + Hash() int +} + +type hashEntry[K any, V any] struct { + k K + v V + next *hashEntry[K, V] +} + +// TypedHashMap represents a key/value map. +type TypedHashMap[K any, V any] struct { + keq func(K, K) bool + veq func(V, V) bool + khash func(K) int + vhash func(V) int + def V + table map[int]*hashEntry[K, V] + size int +} + +// NewTypedHashMap returns a new empty TypedHashMap. +func NewTypedHashMap[K any, V any](keq func(K, K) bool, veq func(V, V) bool, khash func(K) int, vhash func(V) int, def V) *TypedHashMap[K, V] { + return &TypedHashMap[K, V]{ + keq: keq, + veq: veq, + khash: khash, + vhash: vhash, + def: def, + table: make(map[int]*hashEntry[K, V]), + size: 0, + } +} + +// HashMap represents a key/value map. +type HashMap = TypedHashMap[T, T] + +// NewHashMap returns a new empty HashMap. +func NewHashMap(eq func(T, T) bool, hash func(T) int) *HashMap { + return &HashMap{ + keq: eq, + veq: eq, + khash: hash, + vhash: hash, + def: nil, + table: make(map[int]*hashEntry[T, T]), + size: 0, + } +} + +// Copy returns a shallow copy of this HashMap. +func (h *TypedHashMap[K, V]) Copy() *TypedHashMap[K, V] { + cpy := NewTypedHashMap[K, V](h.keq, h.veq, h.khash, h.vhash, h.def) + h.Iter(func(k K, v V) bool { + cpy.Put(k, v) + return false + }) + return cpy +} + +// Equal returns true if this HashMap equals the other HashMap. +// Two hash maps are equal if they contain the same key/value pairs. +func (h *TypedHashMap[K, V]) Equal(other *TypedHashMap[K, V]) bool { + if h.Len() != other.Len() { + return false + } + return !h.Iter(func(k K, v V) bool { + ov, ok := other.Get(k) + if !ok { + return true + } + return !h.veq(v, ov) + }) +} + +// Get returns the value for k. +func (h *TypedHashMap[K, V]) Get(k K) (V, bool) { + hash := h.khash(k) + for entry := h.table[hash]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + return entry.v, true + } + } + return h.def, false +} + +// Delete removes the key k. +func (h *TypedHashMap[K, V]) Delete(k K) { + hash := h.khash(k) + var prev *hashEntry[K, V] + for entry := h.table[hash]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + if prev != nil { + prev.next = entry.next + } else { + h.table[hash] = entry.next + } + h.size-- + return + } + prev = entry + } +} + +// Hash returns the hash code for this hash map. +func (h *TypedHashMap[K, V]) Hash() int { + var hash int + h.Iter(func(k K, v V) bool { + hash += h.khash(k) + h.vhash(v) + return false + }) + return hash +} + +// Iter invokes the iter function for each element in the HashMap. +// If the iter function returns true, iteration stops and the return value is true. +// If the iter function never returns true, iteration proceeds through all elements +// and the return value is false. +func (h *TypedHashMap[K, V]) Iter(iter func(K, V) bool) bool { + for _, entry := range h.table { + for ; entry != nil; entry = entry.next { + if iter(entry.k, entry.v) { + return true + } + } + } + return false +} + +// Len returns the current size of this HashMap. +func (h *TypedHashMap[K, V]) Len() int { + return h.size +} + +// Put inserts a key/value pair into this HashMap. If the key is already present, the existing +// value is overwritten. +func (h *TypedHashMap[K, V]) Put(k K, v V) { + hash := h.khash(k) + head := h.table[hash] + for entry := head; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + entry.v = v + return + } + } + h.table[hash] = &hashEntry[K, V]{k: k, v: v, next: head} + h.size++ +} + +func (h *TypedHashMap[K, V]) String() string { + var buf []string + h.Iter(func(k K, v V) bool { + buf = append(buf, fmt.Sprintf("%v: %v", k, v)) + return false + }) + return "{" + strings.Join(buf, ", ") + "}" +} + +// Update returns a new HashMap with elements from the other HashMap put into this HashMap. +// If the other HashMap contains elements with the same key as this HashMap, the value +// from the other HashMap overwrites the value from this HashMap. +func (h *TypedHashMap[K, V]) Update(other *TypedHashMap[K, V]) *TypedHashMap[K, V] { + updated := h.Copy() + other.Iter(func(k K, v V) bool { + updated.Put(k, v) + return false + }) + return updated +} + +type hasherEntry[K Hasher, V any] struct { + k K + v V + next *hasherEntry[K, V] +} + +// HasherMap represents a simpler version of TypedHashMap that uses Hasher's +// for keys, and requires only an equality function for keys. Ideally we'd have +// and Equal method for all key types too, and we could get rid of that requirement. +type HasherMap[K Hasher, V any] struct { + keq func(K, K) bool + table map[int]*hasherEntry[K, V] + size int +} + +// NewHasherMap returns a new empty HasherMap. +func NewHasherMap[K Hasher, V any](keq func(K, K) bool) *HasherMap[K, V] { + return &HasherMap[K, V]{ + keq: keq, + table: make(map[int]*hasherEntry[K, V]), + size: 0, + } +} + +// Get returns the value for k. +func (h *HasherMap[K, V]) Get(k K) (V, bool) { + for entry := h.table[k.Hash()]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + return entry.v, true + } + } + var zero V + return zero, false +} + +// Put inserts a key/value pair into this HashMap. If the key is already present, the existing +// value is overwritten. +func (h *HasherMap[K, V]) Put(k K, v V) { + hash := k.Hash() + head := h.table[hash] + for entry := head; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + entry.v = v + return + } + } + h.table[hash] = &hasherEntry[K, V]{k: k, v: v, next: head} + h.size++ +} + +// Delete removes the key k. +func (h *HasherMap[K, V]) Delete(k K) { + hash := k.Hash() + var prev *hasherEntry[K, V] + for entry := h.table[hash]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + if prev != nil { + prev.next = entry.next + } else { + h.table[hash] = entry.next + } + h.size-- + return + } + prev = entry + } +} + +// Iter invokes the iter function for each element in the HasherMap. +// If the iter function returns true, iteration stops and the return value is true. +// If the iter function never returns true, iteration proceeds through all elements +// and the return value is false. +func (h *HasherMap[K, V]) Iter(iter func(K, V) bool) bool { + for _, entry := range h.table { + for ; entry != nil; entry = entry.next { + if iter(entry.k, entry.v) { + return true + } + } + } + return false +} + +// Len returns the current size of this HashMap. +func (h *HasherMap[K, V]) Len() int { + return h.size +} diff --git a/vendor/github.com/open-policy-agent/opa/util/json.go b/vendor/github.com/open-policy-agent/opa/v1/util/json.go similarity index 98% rename from vendor/github.com/open-policy-agent/opa/util/json.go rename to vendor/github.com/open-policy-agent/opa/v1/util/json.go index 0b7fd2ed64c..5a4e460b61a 100644 --- a/vendor/github.com/open-policy-agent/opa/util/json.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/json.go @@ -13,7 +13,7 @@ import ( "sigs.k8s.io/yaml" - "github.com/open-policy-agent/opa/loader/extension" + "github.com/open-policy-agent/opa/v1/loader/extension" ) // UnmarshalJSON parses the JSON encoded data and stores the result in the value diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/maps.go b/vendor/github.com/open-policy-agent/opa/v1/util/maps.go new file mode 100644 index 00000000000..c56fbe98ac8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/maps.go @@ -0,0 +1,34 @@ +package util + +import ( + "cmp" + "slices" +) + +// Keys returns a slice of keys from any map. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// KeysSorted returns a slice of keys from any map, sorted in ascending order. +func KeysSorted[M ~map[K]V, K cmp.Ordered, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + slices.Sort(r) + return r +} + +// Values returns a slice of values from any map. Copied from golang.org/x/exp/maps. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/performance.go b/vendor/github.com/open-policy-agent/opa/v1/util/performance.go new file mode 100644 index 00000000000..467fe766bbc --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/performance.go @@ -0,0 +1,64 @@ +package util + +import ( + "math" + "slices" + "unsafe" +) + +// NewPtrSlice returns a slice of pointers to T with length n, +// with only 2 allocations performed no matter the size of n. +// See: +// https://gist.github.com/CAFxX/e96e8a5c3841d152f16d266a1fe7f8bd#slices-of-pointers +func NewPtrSlice[T any](n int) []*T { + return GrowPtrSlice[T](nil, n) +} + +// GrowPtrSlice appends n elements to the slice, each pointing to +// a newly-allocated T. The resulting slice has length equal to len(s)+n. +// +// It performs at most 2 allocations, regardless of n. +func GrowPtrSlice[T any](s []*T, n int) []*T { + s = slices.Grow(s, n) + p := make([]T, n) + for i := range n { + s = append(s, &p[i]) + } + return s +} + +// Allocation free conversion from []byte to string (unsafe) +// Note that the byte slice must not be modified after conversion +func ByteSliceToString(bs []byte) string { + return unsafe.String(unsafe.SliceData(bs), len(bs)) +} + +// Allocation free conversion from ~string to []byte (unsafe) +// Note that the byte slice must not be modified after conversion +func StringToByteSlice[T ~string](s T) []byte { + return unsafe.Slice(unsafe.StringData(string(s)), len(s)) +} + +// NumDigitsInt returns the number of digits in n. +// This is useful for pre-allocating buffers for string conversion. +func NumDigitsInt(n int) int { + if n == 0 { + return 1 + } + + if n < 0 { + n = -n + } + + return int(math.Log10(float64(n))) + 1 +} + +// NumDigitsUint returns the number of digits in n. +// This is useful for pre-allocating buffers for string conversion. +func NumDigitsUint(n uint64) int { + if n == 0 { + return 1 + } + + return int(math.Log10(float64(n))) + 1 +} diff --git a/vendor/github.com/open-policy-agent/opa/util/queue.go b/vendor/github.com/open-policy-agent/opa/v1/util/queue.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/queue.go rename to vendor/github.com/open-policy-agent/opa/v1/util/queue.go diff --git a/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go b/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go rename to vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go index 217638b363f..b979d0bd0f2 100644 --- a/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go @@ -4,13 +4,13 @@ import ( "bytes" "compress/gzip" "encoding/binary" - "fmt" + "errors" "io" "net/http" "strings" "sync" - "github.com/open-policy-agent/opa/util/decoding" + "github.com/open-policy-agent/opa/v1/util/decoding" ) var gzipReaderPool = sync.Pool{ @@ -58,7 +58,7 @@ func ReadMaybeCompressedBody(r *http.Request) ([]byte, error) { // earlier in DecodingLimitHandler. sizeTrailerField := binary.LittleEndian.Uint32(content.Bytes()[content.Len()-4:]) if sizeTrailerField > uint32(gzipMaxLength) { - return content.Bytes(), fmt.Errorf("gzip payload too large") + return content.Bytes(), errors.New("gzip payload too large") } // Pull a gzip decompressor from the pool, and assign it to the current // buffer, using Reset(). Later, return it back to the pool for another diff --git a/vendor/github.com/open-policy-agent/opa/util/time.go b/vendor/github.com/open-policy-agent/opa/v1/util/time.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/time.go rename to vendor/github.com/open-policy-agent/opa/v1/util/time.go diff --git a/vendor/github.com/open-policy-agent/opa/util/wait.go b/vendor/github.com/open-policy-agent/opa/v1/util/wait.go similarity index 94% rename from vendor/github.com/open-policy-agent/opa/util/wait.go rename to vendor/github.com/open-policy-agent/opa/v1/util/wait.go index b70ab6fcf90..b1ea84fd532 100644 --- a/vendor/github.com/open-policy-agent/opa/util/wait.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/wait.go @@ -5,7 +5,7 @@ package util import ( - "fmt" + "errors" "time" ) @@ -24,7 +24,7 @@ func WaitFunc(fun func() bool, interval, timeout time.Duration) error { for { select { case <-timer.C: - return fmt.Errorf("timeout") + return errors.New("timeout") case <-ticker.C: if fun() { return nil diff --git a/vendor/github.com/open-policy-agent/opa/version/version.go b/vendor/github.com/open-policy-agent/opa/v1/version/version.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/version/version.go rename to vendor/github.com/open-policy-agent/opa/v1/version/version.go index 862556bce0d..f8795429fcf 100644 --- a/vendor/github.com/open-policy-agent/opa/version/version.go +++ b/vendor/github.com/open-policy-agent/opa/v1/version/version.go @@ -11,7 +11,7 @@ import ( ) // Version is the canonical version of OPA. -var Version = "0.70.0" +var Version = "1.4.2" // GoVersion is the version of Go this was built with var GoVersion = runtime.Version() @@ -44,6 +44,6 @@ func init() { } } if dirty { - Vcs = Vcs + "-dirty" + Vcs += "-dirty" } } diff --git a/vendor/github.com/open-policy-agent/opa/version/wasm.go b/vendor/github.com/open-policy-agent/opa/v1/version/wasm.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/version/wasm.go rename to vendor/github.com/open-policy-agent/opa/v1/version/wasm.go diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 68ffe3c2480..ad347113c04 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -189,12 +189,15 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } - vlStrings := make([]string, 0, len(d.variableLabels.names)) - for _, vl := range d.variableLabels.names { - if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { - vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) - } else { - vlStrings = append(vlStrings, vl) + vlStrings := []string{} + if d.variableLabels != nil { + vlStrings = make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } } } return fmt.Sprintf( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 51174641729..6b8684731c9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -288,7 +288,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } func attachOriginalName(desc, origName string) string { - return fmt.Sprintf("%s Sourced from %s", desc, origName) + return fmt.Sprintf("%s Sourced from %s.", desc, origName) } // Describe returns all descriptions of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 519db348a74..c453b754a7f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "math" "runtime" @@ -28,6 +29,11 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" ) +const ( + nativeHistogramSchemaMaximum = 8 + nativeHistogramSchemaMinimum = -4 +) + // nativeHistogramBounds for the frac of observed values. Only relevant for // schema > 0. The position in the slice is the schema. (0 is never used, just // here for convenience of using the schema directly as the index.) @@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { // used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(min, max float64, count int) []float64 { +func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 { if count < 1 { panic("ExponentialBucketsRange count needs a positive count") } - if min <= 0 { + if minBucket <= 0 { panic("ExponentialBucketsRange min needs to be greater than 0") } @@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 { // max = min*growthFactor^(bucketCount-1) // We know max/min and highest bucket. Solve for growthFactor. - growthFactor := math.Pow(max/min, 1.0/float64(count-1)) + growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1)) // Now that we know growthFactor, solve for each bucket. buckets := make([]float64, count) for i := 1; i <= count; i++ { - buckets[i-1] = min * math.Pow(growthFactor, float64(i-1)) + buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1)) } return buckets } @@ -858,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error { // findBucket returns the index of the bucket for the provided value, or // len(h.upperBounds) for the +Inf bucket. func (h *histogram) findBucket(v float64) int { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + n := len(h.upperBounds) + if n == 0 { + return 0 + } + + // Early exit: if v is less than or equal to the first upper bound, return 0 + if v <= h.upperBounds[0] { + return 0 + } + + // Early exit: if v is greater than the last upper bound, return len(h.upperBounds) + if v > h.upperBounds[n-1] { + return n + } + + // For small arrays, use simple linear search + // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers + // see more details here: https://github.com/prometheus/client_golang/pull/1662 + if n < 35 { + for i, bound := range h.upperBounds { + if v <= bound { + return i + } + } + // If v is greater than all upper bounds, return len(h.upperBounds) + return n + } + + // For larger arrays, use stdlib's binary search return sort.SearchFloat64s(h.upperBounds, v) } @@ -1440,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 { floor := math.Floor(math.Log2(math.Log2(bucketFactor))) switch { case floor <= -8: - return 8 + return nativeHistogramSchemaMaximum case floor >= 4: - return -4 + return nativeHistogramSchemaMinimum default: return -int32(floor) } @@ -1835,3 +1861,196 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) } } + +type constNativeHistogram struct { + desc *Desc + dto.Histogram + labelPairs []*dto.LabelPair +} + +func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error { + var bucketPopulationSum int64 + for _, v := range positiveBuckets { + bucketPopulationSum += v + } + for _, v := range negativeBuckets { + bucketPopulationSum += v + } + bucketPopulationSum += int64(zeroBucket) + + // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts. + // Otherwise, the number of observations must be equal to the sum of all bucket counts . + + if math.IsNaN(sum) && bucketPopulationSum > int64(count) || + !math.IsNaN(sum) && bucketPopulationSum != int64(count) { + return errors.New("the sum of all bucket populations exceeds the count of observations") + } + return nil +} + +// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with +// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// zeroBucket counts all (positive and negative) +// observations in the zero bucket (with an absolute value less or equal +// the current threshold). +// positiveBuckets and negativeBuckets are separate maps for negative and positive +// observations. The map's value is an int64, counting observations in +// that bucket. The map's key is the +// index of the bucket according to the used +// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets. +// NewConstNativeHistogram returns an error if +// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid. +// - the schema passed is not between 8 and -4 +// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN) +// +// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus. +func NewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + schema int32, + zeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum { + return nil, errors.New("invalid native histogram schema") + } + if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil { + return nil, err + } + + NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets) + PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets) + ret := &constNativeHistogram{ + desc: desc, + Histogram: dto.Histogram{ + CreatedTimestamp: timestamppb.New(createdTimestamp), + Schema: &schema, + ZeroThreshold: &zeroThreshold, + SampleCount: &count, + SampleSum: &sum, + + NegativeSpan: NegativeSpan, + NegativeDelta: NegativeDelta, + + PositiveSpan: PositiveSpan, + PositiveDelta: PositiveDelta, + + ZeroCount: proto.Uint64(zeroBucket), + }, + labelPairs: MakeLabelPairs(desc, labelValues), + } + if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 { + ret.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } + return ret, nil +} + +// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where +// NewConstNativeHistogram would have returned an error. +func MustNewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + nativeHistogramSchema int32, + nativeHistogramZeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) Metric { + nativehistogram, err := NewConstNativeHistogram(desc, + count, + sum, + positiveBuckets, + negativeBuckets, + zeroBucket, + nativeHistogramSchema, + nativeHistogramZeroThreshold, + createdTimestamp, + labelValues...) + if err != nil { + panic(err) + } + return nativehistogram +} + +func (h *constNativeHistogram) Desc() *Desc { + return h.desc +} + +func (h *constNativeHistogram) Write(out *dto.Metric) error { + out.Histogram = &h.Histogram + out.Label = h.labelPairs + return nil +} + +func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) { + if len(buckets) == 0 { + return nil, nil + } + var ii []int + for k := range buckets { + ii = append(ii, k) + } + sort.Ints(ii) + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + count := buckets[i] + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one or two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index a595a203625..8b016355adb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -22,17 +22,18 @@ import ( "bytes" "fmt" "io" + "strconv" "strings" ) -func min(a, b int) int { +func minInt(a, b int) int { if a < b { return a } return b } -func max(a, b int) int { +func maxInt(a, b int) int { if a > b { return a } @@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { if codes[0].Tag == 'e' { c := codes[0] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2} } if codes[len(codes)-1].Tag == 'e' { c := codes[len(codes)-1] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)} } nn := n + n groups := [][]OpCode{} @@ -443,12 +444,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { // there is a large range with no changes. if c.Tag == 'e' && i2-i1 > nn { group = append(group, OpCode{ - c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n), + c.Tag, i1, minInt(i2, i1+n), + j1, minInt(j2, j1+n), }) groups = append(groups, group) group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) + i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n) } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } @@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 { // is faster to compute than either .Ratio() or .QuickRatio(). func (m *SequenceMatcher) RealQuickRatio() float64 { la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) + return calculateRatio(minInt(la, lb), la+lb) } // Convert range to the "ed" format @@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string { beginning := start + 1 // lines start numbering with one length := stop - start if length == 1 { - return fmt.Sprintf("%d", beginning) + return strconv.Itoa(beginning) } if length == 0 { beginning-- // empty ranges begin at line just before the range diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index 97d17d6cb60..f7f97ef9262 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) name += "_total" } - valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) + // Our current conversion moves to legacy naming, so use legacy validation. + valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 9d9b81ab448..592eec3e24f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string { if name == "" { return "" } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") + + sb := strings.Builder{} + sb.Grow(len(namespace) + len(subsystem) + len(name) + 2) + + if namespace != "" { + sb.WriteString(namespace) + sb.WriteString("_") } - return name + + if subsystem != "" { + sb.WriteString(subsystem) + sb.WriteString("_") + } + + sb.WriteString(name) + + return sb.String() } type invalidMetric struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 62a4e7ad9a0..e7bce8b58ec 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -23,6 +23,7 @@ import ( type processCollector struct { collectFn func(chan<- Metric) + describeFn func(chan<- *Desc) pidFn func() (int, error) reportErrors bool cpuTotal *Desc @@ -122,26 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { // Set up process metric collection if supported by the runtime. if canCollectProcess() { c.collectFn = c.processCollect + c.describeFn = c.describe } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } + c.collectFn = c.errorCollectFn + c.describeFn = c.errorDescribeFn } return c } -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime - ch <- c.inBytes - ch <- c.outBytes +func (c *processCollector) errorCollectFn(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) +} + +func (c *processCollector) errorDescribeFn(ch chan<- *Desc) { + if c.reportErrors { + ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform")) + } } // Collect returns the current state of all metrics of the collector. @@ -149,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + c.describeFn(ch) +} + func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { if !c.reportErrors { return diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go new file mode 100644 index 00000000000..0a61b984613 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -0,0 +1,130 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios + +package prometheus + +import ( + "errors" + "fmt" + "os" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// isn't available. +var notImplementedErr = errors.New("not implemented") + +type memoryInfo struct { + vsize uint64 // Virtual memory size in bytes + rss uint64 // Resident memory size in bytes +} + +func canCollectProcess() bool { + return true +} + +func getSoftLimit(which int) (uint64, error) { + rlimit := syscall.Rlimit{} + + if err := syscall.Getrlimit(which, &rlimit); err != nil { + return 0, err + } + + return rlimit.Cur, nil +} + +func getOpenFileCount() (float64, error) { + // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to + // return a list of open fds, but that requires a way to call C APIs. The + // benefits, however, include fewer system calls and not failing when at the + // open file soft limit. + + if dir, err := os.Open("/dev/fd"); err != nil { + return 0.0, err + } else { + defer dir.Close() + + // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is + // that info not used, but KQUEUE descriptors fail stat(2), which causes + // the whole method to fail. + if names, err := dir.Readdirnames(0); err != nil { + return 0.0, err + } else { + // Subtract 1 to ignore the open /dev/fd descriptor above. + return float64(len(names) - 1), nil + } + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil { + if len(procs) == 1 { + startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9) + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs)) + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, c.startTime, err) + } + + // The proc structure returned by kern.proc.pid above has an Rusage member, + // but it is not filled in, so it needs to be fetched by getrusage(2). For + // that call, the UTime, STime, and Maxrss members are filled out, but not + // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require + // access to the C API to call task_info(TASK_BASIC_INFO). + rusage := unix.Rusage{} + + if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil { + cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds() + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime) + } else { + c.reportError(ch, c.cpuTotal, err) + } + + if memInfo, err := getMemory(); err == nil { + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) + } else if !errors.Is(err, notImplementedErr) { + // Don't report an error when support is not compiled in. + c.reportError(ch, c.rss, err) + c.reportError(ch, c.vsize, err) + } + + if fds, err := getOpenFileCount(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds) + } else { + c.reportError(ch, c.openFDs, err) + } + + if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles)) + } else { + c.reportError(ch, c.maxFDs, err) + } + + if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil { + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace)) + } else { + c.reportError(ch, c.maxVsize, err) + } + + // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might + // be able to get the per-process network send/receive counts. +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c new file mode 100644 index 00000000000..d00a24315de --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c @@ -0,0 +1,84 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +#include +#include +#include + +// The compiler warns that mach/shared_memory_server.h is deprecated, and to use +// mach/shared_region.h instead. But that doesn't define +// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and +// avoid a warning message when running tests. +#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U +#define SHARED_DATA_REGION_SIZE 0x10000000 +#define SHARED_TEXT_REGION_SIZE 0x10000000 + + +int get_memory_info(unsigned long long *rss, unsigned long long *vsize) +{ + // This is lightly adapted from how ps(1) obtains its memory info. + // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109 + + kern_return_t error; + task_t task = MACH_PORT_NULL; + mach_task_basic_info_data_t info; + mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT; + + error = task_info( + mach_task_self(), + MACH_TASK_BASIC_INFO, + (task_info_t) &info, + &info_count ); + + if( error != KERN_SUCCESS ) + { + return error; + } + + *rss = info.resident_size; + *vsize = info.virtual_size; + + { + vm_region_basic_info_data_64_t b_info; + mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT; + mach_vm_size_t size; + mach_port_t object_name; + + /* + * try to determine if this task has the split libraries + * mapped in... if so, adjust its virtual size down by + * the 2 segments that are used for split libraries + */ + info_count = VM_REGION_BASIC_INFO_COUNT_64; + + error = mach_vm_region( + mach_task_self(), + &address, + &size, + VM_REGION_BASIC_INFO_64, + (vm_region_info_t) &b_info, + &info_count, + &object_name); + + if (error == KERN_SUCCESS) { + if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) && + *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) { + *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE); + } + } + } + + return 0; +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go new file mode 100644 index 00000000000..9ac53f99925 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go @@ -0,0 +1,51 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +package prometheus + +/* +int get_memory_info(unsigned long long *rss, unsigned long long *vs); +*/ +import "C" +import "fmt" + +func getMemory() (*memoryInfo, error) { + var rss, vsize C.ulonglong + + if err := C.get_memory_info(&rss, &vsize); err != 0 { + return nil, fmt.Errorf("task_info() failed with 0x%x", int(err)) + } + + return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + ch <- c.rss + ch <- c.vsize + + /* the process could be collected but not implemented yet + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go new file mode 100644 index 00000000000..8ddb0995d6a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && !cgo + +package prometheus + +func getMemory() (*memoryInfo, error) { + return nil, notImplementedErr +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + + /* the process could be collected but not implemented yet + ch <- c.rss + ch <- c.vsize + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go similarity index 56% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go index b1e363d6cf6..7732b7f3764 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build js -// +build js +//go:build wasip1 || js || ios +// +build wasip1 js ios package prometheus @@ -21,6 +21,13 @@ func canCollectProcess() bool { } func (c *processCollector) processCollect(ch chan<- Metric) { - // noop on this platform - return + c.errorCollectFn(ch) +} + +// describe returns all descriptions of the collector for wasip1 and js. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + c.errorDescribeFn(ch) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go similarity index 77% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 14d56d2d068..9f4b130befa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js && !wasip1 -// +build !windows,!js,!wasip1 +//go:build !windows && !js && !wasip1 && !darwin +// +build !windows,!js,!wasip1,!darwin package prometheus @@ -78,3 +78,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { c.reportError(ch, nil, err) } } + +// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go deleted file mode 100644 index d8d9a6d7a2f..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build wasip1 -// +build wasip1 - -package prometheus - -func canCollectProcess() bool { - return false -} - -func (*processCollector) processCollect(chan<- Metric) { - // noop on this platform - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go index f973398df2d..fa474289ef8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) { } func (c *processCollector) processCollect(ch chan<- Metric) { - h, err := windows.GetCurrentProcess() - if err != nil { - c.reportError(ch, nil, err) - return - } + h := windows.CurrentProcess() var startTime, exitTime, kernelTime, userTime windows.Filetime - err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) if err != nil { c.reportError(ch, nil, err) return @@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. } +// describe returns all descriptions of the collector for windows. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + func fileTimeToSeconds(ft windows.Filetime) float64 { return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index e598e66e688..28eed26727a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -207,7 +207,13 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO if encodingHeader != string(Identity) { rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) + + var enc expfmt.Encoder + if opts.EnableOpenMetricsTextCreatedSamples { + enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines()) + } else { + enc = expfmt.NewEncoder(w, contentType) + } // handleError handles the error according to opts.ErrorHandling // and returns true if we have to abort after the handling. @@ -408,6 +414,21 @@ type HandlerOpts struct { // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool + // EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic + // Created Timestamps for counters, histograms and summaries, which for the current + // version of OpenMetrics are defined as extra series with the same name and "_created" + // suffix. See also the OpenMetrics specification for more details + // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1 + // + // Created timestamps are used to improve the accuracy of reset detection, + // but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality + // if the scraper does not handle those metrics correctly (converting to created timestamp + // instead of leaving those series as-is). New OpenMetrics versions might improve + // this situation. + // + // Prometheus introduced the feature flag 'created-timestamp-zero-ingestion' + // in version 2.50.0 to handle this situation. + EnableOpenMetricsTextCreatedSamples bool // ProcessStartTime allows setting process start timevalue that will be exposed // with "Process-Start-Time-Unix" response header along with the metrics // payload. This allow callers to have efficient transformations to cumulative diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1ab0e479655..ac5203c6faa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { s := &summary{ desc: desc, + now: opts.now, objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), @@ -280,6 +281,8 @@ type summary struct { desc *Desc + now func() time.Time + objectives map[float64]float64 sortedObjectives []float64 @@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) { s.bufMtx.Lock() defer s.bufMtx.Unlock() - now := time.Now() + now := s.now() if now.After(s.hotBufExpTime) { s.asyncFlush(now) } @@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error { s.bufMtx.Lock() s.mtx.Lock() // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) + s.swapBufs(s.now()) s.bufMtx.Unlock() s.flushColdBuf() diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa21643..1448439b7f7 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9d92..d7f3d76f55d 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd178..b26886560d7 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,7 +15,7 @@ package expfmt import ( - "fmt" + "errors" "strings" "github.com/prometheus/common/model" @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, errors.New("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93f92..a21ed4ec1f8 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -38,7 +38,7 @@ type EncoderOption func(*encoderOption) // WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder // to include _created lines (See -// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). // Created timestamps can improve the accuracy of series reset detection, but // come with a bandwidth cost. // @@ -102,7 +102,7 @@ func WithUnit() EncoderOption { // // - According to the OM specs, the `# UNIT` line is optional, but if populated, // the unit has to be present in the metric name as its suffix: -// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). // However, in order to accommodate any potential scenario where such a change in the // metric name is not desirable, the users are here given the choice of either explicitly // opt in, in case they wish for the unit to be included in the output AND in the metric name @@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { compliantName = name[:len(name)-6] } - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { - compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { + compliantName = compliantName + "_" + *in.Unit } // Comments, first HELP, then TYPE. @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a9ec..4b86434b332 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211af2..b4607fe4d27 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { @@ -775,7 +895,7 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 80d1fe944ea..bd3a39e3e14 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -14,6 +14,7 @@ package model import ( + "errors" "fmt" "time" ) @@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") + return errors.New("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22ff7..73b7aa3e60b 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b46e5..abb2c900183 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e71..00000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a59c..5766107cf95 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,9 +14,11 @@ package model import ( + "errors" "fmt" "regexp" "sort" + "strconv" "strings" "unicode/utf8" @@ -26,18 +28,21 @@ import ( var ( // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode - // in isolation from other components that don't support UTF-8 may result in - // bugs or other undefined behavior. This value is intended to be set by - // UTF-8-aware binaries as part of their startup. To avoid need for locking, - // this value should be set once, ideally in an init(), before multiple - // goroutines are started. - NameValidationScheme = LegacyValidation - - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 + // mode in isolation from other components that don't support UTF-8 may result + // in bugs or other undefined behavior. This value can be set to + // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To + // avoid need for locking, this value should be set once, ideally in an + // init(), before multiple goroutines are started. + NameValidationScheme = UTF8Validation + + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +166,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +181,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +213,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +235,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +245,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,20 +261,16 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } return false } -const ( - lowerhex = "0123456789abcdef" -) - // EscapeName escapes the incoming name according to the provided escaping // scheme. Depending on the rules of escaping, this may cause no change in the // string that is returned. (Especially NoEscaping, which by definition is a @@ -283,7 +284,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -304,31 +305,25 @@ func EscapeName(name string, scheme EscapingScheme) string { } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else { - escaped.WriteRune('_') + escaped.WriteString("__") } } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") for i, b := range name { - if isValidLegacyRune(b, i) { + if b == '_' { + escaped.WriteString("__") + } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else if !utf8.ValidRune(b) { escaped.WriteString("_FFFD_") - } else if b < 0x100 { - escaped.WriteRune('_') - for s := 4; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } - escaped.WriteRune('_') - } else if b < 0x10000 { + } else { escaped.WriteRune('_') - for s := 12; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } + escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') } } @@ -386,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string { // We think we are in a UTF-8 code, process it. var utf8Val uint for j := 0; i < len(escapedName); j++ { - // This is too many characters for a utf8 value. - if j > 4 { + // This is too many characters for a utf8 value based on the MaxRune + // value of '\U0010FFFF'. + if j >= 6 { return name } // Found a closing underscore, convert to a rune, check validity, and append. @@ -440,7 +436,7 @@ func (e EscapingScheme) String() string { func ToEscapingScheme(s string) (EscapingScheme, error) { if s == "" { - return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + return NoEscaping, errors.New("got empty string instead of escaping scheme") } switch s { case AllowUTF8: @@ -452,6 +448,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 910b0b71fcc..8f91a9702e0 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "regexp" "time" @@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") + return errors.New("label name in matcher must not be empty") } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -77,7 +78,7 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") + return errors.New("at least one matcher required") } for _, m := range s.Matchers { if err := m.Validate(); err != nil { @@ -85,22 +86,22 @@ func (s *Silence) Validate() error { } } if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") + return errors.New("end time missing") } if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") + return errors.New("creator information missing") } if s.Comment == "" { - return fmt.Errorf("comment missing") + return errors.New("comment missing") } if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") + return errors.New("creation timestamp missing") } return nil } diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index ae35cc2ab4b..6bfc757d18b 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "math" "strconv" @@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) { // UnmarshalJSON implements json.Unmarshaler. func (v *SampleValue) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") + return errors.New("sample value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 54bb038cfff..895e6a3e839 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) { func (v *FloatString) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("float value must be a quoted string") + return errors.New("float value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { @@ -141,7 +142,7 @@ type SampleHistogramPair struct { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { if s.Histogram == nil { - return nil, fmt.Errorf("histogram is nil") + return nil, errors.New("histogram is nil") } t, err := json.Marshal(s.Timestamp) if err != nil { @@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } if s.Histogram == nil { - return fmt.Errorf("histogram is null") + return errors.New("histogram is null") } return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/spf13/afero/.editorconfig similarity index 69% rename from vendor/github.com/fsnotify/fsnotify/.editorconfig rename to vendor/github.com/spf13/afero/.editorconfig index fad895851e5..4492e9f9fe1 100644 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ b/vendor/github.com/spf13/afero/.editorconfig @@ -1,12 +1,12 @@ root = true -[*.go] -indent_style = tab +[*] +charset = utf-8 +end_of_line = lf indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] indent_style = space -indent_size = 2 insert_final_newline = true trim_trailing_whitespace = true + +[*.go] +indent_style = tab diff --git a/vendor/github.com/spf13/afero/.golangci.yaml b/vendor/github.com/spf13/afero/.golangci.yaml new file mode 100644 index 00000000000..806289a2507 --- /dev/null +++ b/vendor/github.com/spf13/afero/.golangci.yaml @@ -0,0 +1,18 @@ +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/spf13/afero) + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - staticcheck + +issues: + exclude-dirs: + - gcsfs/internal/stiface diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index 3bafbfdfcaf..619af574f38 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -12,7 +12,7 @@ types and methods. Afero has an exceptionally clean interface and simple design without needless constructors or initialization methods. Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power +filesystems that make it easy to work with, while retaining all the power and benefit of the os and ioutil packages. Afero provides significant improvements over using the os package alone, most diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go index 938b9316e6b..b13155ca4a9 100644 --- a/vendor/github.com/spf13/afero/iofs.go +++ b/vendor/github.com/spf13/afero/iofs.go @@ -255,7 +255,6 @@ func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { ret := make([]os.FileInfo, len(entries)) for i := range entries { ret[i], err = entries[i].Info() - if err != nil { return nil, err } diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index d6c744e8d56..ed92f5649da 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -16,11 +16,9 @@ package afero import ( "fmt" "io" - "log" "os" "path/filepath" - "sort" "strings" "sync" diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 6444f4b7f6f..71757151c33 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,4 +1,5 @@ -![cobra logo](assets/CobraMain.png) + +![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) Cobra is a library for creating powerful modern CLI applications. @@ -105,7 +106,7 @@ go install github.com/spf13/cobra-cli@latest For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). +For complete details on using the Cobra library, please read [The Cobra User Guide](site/content/user_guide.md). # License diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 25c30e3ccc3..b3e2dadfed6 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -35,7 +35,7 @@ const ( // This function can be called multiple times before and/or after completions are added to // the array. Each time this function is called with the same array, the new // ActiveHelp line will be shown below the previous ones when completion is triggered. -func AppendActiveHelp(compArray []string, activeHelpStr string) []string { +func AppendActiveHelp(compArray []Completion, activeHelpStr string) []Completion { return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) } diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index 1cce5c329c2..d2397aa3668 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -146,7 +146,7 @@ __%[1]s_process_completion_results() { if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering - local fullFilter filter filteringCmd + local fullFilter="" filter filteringCmd # Do not use quotes around the $completions variable or else newline # characters will be kept. @@ -177,20 +177,71 @@ __%[1]s_process_completion_results() { __%[1]s_handle_special_char "$cur" = # Print the activeHelp statements before we finish + __%[1]s_handle_activeHelp +} + +__%[1]s_handle_activeHelp() { + # Print the activeHelp statements if ((${#activeHelp[*]} != 0)); then - printf "\n"; - printf "%%s\n" "${activeHelp[@]}" - printf "\n" - - # The prompt format is only available from bash 4.4. - # We test if it is available before using it. - if (x=${PS1@P}) 2> /dev/null; then - printf "%%s" "${PS1@P}${COMP_LINE[@]}" - else - # Can't print the prompt. Just print the - # text the user had typed, it is workable enough. - printf "%%s" "${COMP_LINE[@]}" + if [ -z $COMP_TYPE ]; then + # Bash v3 does not set the COMP_TYPE variable. + printf "\n"; + printf "%%s\n" "${activeHelp[@]}" + printf "\n" + __%[1]s_reprint_commandLine + return fi + + # Only print ActiveHelp on the second TAB press + if [ $COMP_TYPE -eq 63 ]; then + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + if ((${#COMPREPLY[*]} == 0)); then + # When there are no completion choices from the program, file completion + # may kick in if the program has not disabled it; in such a case, we want + # to know if any files will match what the user typed, so that we know if + # there will be completions presented, so that we know how to handle ActiveHelp. + # To find out, we actually trigger the file completion ourselves; + # the call to _filedir will fill COMPREPLY if files match. + if (((directive & shellCompDirectiveNoFileComp) == 0)); then + __%[1]s_debug "Listing files" + _filedir + fi + fi + + if ((${#COMPREPLY[*]} != 0)); then + # If there are completion choices to be shown, print a delimiter. + # Re-printing the command-line will automatically be done + # by the shell when it prints the completion choices. + printf -- "--" + else + # When there are no completion choices at all, we need + # to re-print the command-line since the shell will + # not be doing it itself. + __%[1]s_reprint_commandLine + fi + elif [ $COMP_TYPE -eq 37 ] || [ $COMP_TYPE -eq 42 ]; then + # For completion type: menu-complete/menu-complete-backward and insert-completions + # the completions are immediately inserted into the command-line, so we first + # print the activeHelp message and reprint the command-line since the shell won't. + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + __%[1]s_reprint_commandLine + fi + fi +} + +__%[1]s_reprint_commandLine() { + # The prompt format is only available from bash 4.4. + # We test if it is available before using it. + if (x=${PS1@P}) 2> /dev/null; then + printf "%%s" "${PS1@P}${COMP_LINE[@]}" + else + # Can't print the prompt. Just print the + # text the user had typed, it is workable enough. + printf "%%s" "${COMP_LINE[@]}" fi } @@ -201,6 +252,8 @@ __%[1]s_extract_activeHelp() { local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do + [[ -z $comp ]] && continue + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __%[1]s_debug "ActiveHelp found: $comp" @@ -223,16 +276,21 @@ __%[1]s_handle_completion_types() { # If the user requested inserting one completion at a time, or all # completions at once on the command-line we must remove the descriptions. # https://github.com/spf13/cobra/issues/1508 - local tab=$'\t' comp - while IFS='' read -r comp; do - [[ -z $comp ]] && continue - # Strip any description - comp=${comp%%%%$tab*} - # Only consider the completions that match - if [[ $comp == "$cur"* ]]; then - COMPREPLY+=("$comp") - fi - done < <(printf "%%s\n" "${completions[@]}") + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 + + local tab=$'\t' + + # Strip any description and escape the completion to handled special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]%%%%$tab*}") + + # Only consider the completions that match + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so we need to escape all completions again since they will + # all be inserted on the command-line. + IFS=$'\n' read -ra COMPREPLY -d '' < <(printf "%%q\n" "${COMPREPLY[@]}") ;; *) @@ -243,11 +301,25 @@ __%[1]s_handle_completion_types() { } __%[1]s_handle_standard_completion_case() { - local tab=$'\t' comp + local tab=$'\t' + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 # Short circuit to optimize if we don't have descriptions if [[ "${completions[*]}" != *$tab* ]]; then - IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") + # First, escape the completions to handle special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]}") + # Only consider the completions that match what the user typed + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so, if there is only a single completion, we need to + # escape it again because it will be inserted on the command-line. If there are multiple + # completions, we don't want to escape them because they will be printed in a list + # and we don't want to show escape characters in that list. + if (( ${#COMPREPLY[@]} == 1 )); then + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]}") + fi return 0 fi @@ -256,23 +328,39 @@ __%[1]s_handle_standard_completion_case() { # Look for the longest completion so that we can format things nicely while IFS='' read -r compline; do [[ -z $compline ]] && continue - # Strip any description before checking the length - comp=${compline%%%%$tab*} + + # Before checking if the completion matches what the user typed, + # we need to strip any description and escape the completion to handle special + # characters because those escape characters are part of what the user typed. + # Don't call "printf" in a sub-shell because it will be much slower + # since we are in a loop. + printf -v comp "%%q" "${compline%%%%$tab*}" &>/dev/null || comp=$(printf "%%q" "${compline%%%%$tab*}") + # Only consider the completions that match [[ $comp == "$cur"* ]] || continue + + # The completions matches. Add it to the list of full completions including + # its description. We don't escape the completion because it may get printed + # in a list if there are more than one and we don't want show escape characters + # in that list. COMPREPLY+=("$compline") + + # Strip any description before checking the length, and again, don't escape + # the completion because this length is only used when printing the completions + # in a list and we don't want show escape characters in that list. + comp=${compline%%%%$tab*} if ((${#comp}>longest)); then longest=${#comp} fi done < <(printf "%%s\n" "${completions[@]}") - # If there is a single completion left, remove the description text + # If there is a single completion left, remove the description text and escape any special characters if ((${#COMPREPLY[*]} == 1)); then __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" - comp="${COMPREPLY[0]%%%%$tab*}" - __%[1]s_debug "Removed description from single completion, which is now: ${comp}" - COMPREPLY[0]=$comp - else # Format the descriptions + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]%%%%$tab*}") + __%[1]s_debug "Removed description from single completion, which is now: ${COMPREPLY[0]}" + else + # Format the descriptions __%[1]s_format_comp_descriptions $longest fi } diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index e0b0947b04c..d9cd2414e23 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -176,12 +176,16 @@ func rpad(s string, padding int) string { return fmt.Sprintf(formattedString, s) } -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) +func tmpl(text string) *tmplFunc { + return &tmplFunc{ + tmpl: text, + fn: func(w io.Writer, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) + }, + } } // ld compares two strings and returns the levenshtein distance between them. diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 54748fc67eb..dbb2c298ba0 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -33,6 +33,9 @@ import ( const ( FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" + + helpFlagName = "help" + helpCommandName = "help" ) // FParseErrWhitelist configures Flag parse errors to be ignored @@ -80,11 +83,11 @@ type Command struct { Example string // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions - ValidArgs []string + ValidArgs []Completion // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. // It is a dynamic version of using ValidArgs. // Only one of ValidArgs and ValidArgsFunction can be used for a command. - ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + ValidArgsFunction CompletionFunc // Expected arguments Args PositionalArgs @@ -168,12 +171,12 @@ type Command struct { // usageFunc is usage func defined by user. usageFunc func(*Command) error // usageTemplate is usage template defined by user. - usageTemplate string + usageTemplate *tmplFunc // flagErrorFunc is func defined by user and it's called when the parsing of // flags returns an error. flagErrorFunc func(*Command, error) error // helpTemplate is help template defined by user. - helpTemplate string + helpTemplate *tmplFunc // helpFunc is help func defined by user. helpFunc func(*Command, []string) // helpCommand is command with usage 'help'. If it's not defined by user, @@ -186,7 +189,7 @@ type Command struct { completionCommandGroupID string // versionTemplate is the version template defined by user. - versionTemplate string + versionTemplate *tmplFunc // errPrefix is the error message prefix defined by user. errPrefix string @@ -281,6 +284,7 @@ func (c *Command) SetArgs(a []string) { // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. +// // Deprecated: Use SetOut and/or SetErr instead func (c *Command) SetOutput(output io.Writer) { c.outWriter = output @@ -312,7 +316,11 @@ func (c *Command) SetUsageFunc(f func(*Command) error) { // SetUsageTemplate sets usage template. Can be defined by Application. func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s + if s == "" { + c.usageTemplate = nil + return + } + c.usageTemplate = tmpl(s) } // SetFlagErrorFunc sets a function to generate an error when flag parsing @@ -348,12 +356,20 @@ func (c *Command) SetCompletionCommandGroupID(groupID string) { // SetHelpTemplate sets help template to be used. Application can use it to set custom template. func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s + if s == "" { + c.helpTemplate = nil + return + } + c.helpTemplate = tmpl(s) } // SetVersionTemplate sets version template to be used. Application can use it to set custom template. func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s + if s == "" { + c.versionTemplate = nil + return + } + c.versionTemplate = tmpl(s) } // SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. @@ -434,7 +450,8 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } return func(c *Command) error { c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + fn := c.getUsageTemplateFunc() + err := fn(c.OutOrStderr(), c) if err != nil { c.PrintErrln(err) } @@ -442,6 +459,19 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } } +// getUsageTemplateFunc returns the usage template function for the command +// going up the command tree if necessary. +func (c *Command) getUsageTemplateFunc() func(w io.Writer, data interface{}) error { + if c.usageTemplate != nil { + return c.usageTemplate.fn + } + + if c.HasParent() { + return c.parent.getUsageTemplateFunc() + } + return defaultUsageFunc +} + // Usage puts out the usage for the command. // Used when a user provides invalid input. // Can be defined by user by overriding UsageFunc. @@ -460,15 +490,30 @@ func (c *Command) HelpFunc() func(*Command, []string) { } return func(c *Command, a []string) { c.mergePersistentFlags() + fn := c.getHelpTemplateFunc() // The help should be sent to stdout // See https://github.com/spf13/cobra/issues/1002 - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + err := fn(c.OutOrStdout(), c) if err != nil { c.PrintErrln(err) } } } +// getHelpTemplateFunc returns the help template function for the command +// going up the command tree if necessary. +func (c *Command) getHelpTemplateFunc() func(w io.Writer, data interface{}) error { + if c.helpTemplate != nil { + return c.helpTemplate.fn + } + + if c.HasParent() { + return c.parent.getHelpTemplateFunc() + } + + return defaultHelpFunc +} + // Help puts out the help for the command. // Used when a user calls help [command]. // Can be defined by user by overriding HelpFunc. @@ -543,71 +588,55 @@ func (c *Command) NamePadding() int { } // UsageTemplate returns usage template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate + if c.usageTemplate != nil { + return c.usageTemplate.tmpl } if c.HasParent() { return c.parent.UsageTemplate() } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} - -Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} - -{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} - -Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` + return defaultUsageTemplate } // HelpTemplate return help template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate + if c.helpTemplate != nil { + return c.helpTemplate.tmpl } if c.HasParent() { return c.parent.HelpTemplate() } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + return defaultHelpTemplate } // VersionTemplate return version template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate + if c.versionTemplate != nil { + return c.versionTemplate.tmpl } if c.HasParent() { return c.parent.VersionTemplate() } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` + return defaultVersionTemplate +} + +// getVersionTemplateFunc returns the version template function for the command +// going up the command tree if necessary. +func (c *Command) getVersionTemplateFunc() func(w io.Writer, data interface{}) error { + if c.versionTemplate != nil { + return c.versionTemplate.fn + } + + if c.HasParent() { + return c.parent.getVersionTemplateFunc() + } + return defaultVersionFunc } // ErrPrefix return error message prefix for the command @@ -894,7 +923,7 @@ func (c *Command) execute(a []string) (err error) { // If help is called, regardless of other flags, return we want help. // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") + helpVal, err := c.Flags().GetBool(helpFlagName) if err != nil { // should be impossible to get here as we always declare a help // flag in InitDefaultHelpFlag() @@ -914,7 +943,8 @@ func (c *Command) execute(a []string) (err error) { return err } if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + fn := c.getVersionTemplateFunc() + err := fn(c.OutOrStdout(), c) if err != nil { c.Println(err) } @@ -1068,12 +1098,6 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() - // initialize completion at the last point to allow for user overriding - c.InitDefaultCompletionCmd() - - // Now that all commands have been created, let's make sure all groups - // are properly created also - c.checkCommandGroups() args := c.args @@ -1082,9 +1106,16 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = os.Args[1:] } - // initialize the hidden command to be used for shell completion + // initialize the __complete command to be used for shell completion c.initCompleteCmd(args) + // initialize the default completion command + c.InitDefaultCompletionCmd(args...) + + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + var flags []string if c.TraverseChildren { cmd, flags, err = c.Traverse(args) @@ -1187,16 +1218,16 @@ func (c *Command) checkCommandGroups() { // If c already has help flag, it will do nothing. func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { + if c.Flags().Lookup(helpFlagName) == nil { usage := "help for " - name := c.displayName() + name := c.DisplayName() if name == "" { usage += "this command" } else { usage += name } - c.Flags().BoolP("help", "h", false, usage) - _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) + c.Flags().BoolP(helpFlagName, "h", false, usage) + _ = c.Flags().SetAnnotation(helpFlagName, FlagSetByCobraAnnotation, []string{"true"}) } } @@ -1215,7 +1246,7 @@ func (c *Command) InitDefaultVersionFlag() { if c.Name() == "" { usage += "this command" } else { - usage += c.Name() + usage += c.DisplayName() } if c.Flags().ShorthandLookup("v") == nil { c.Flags().BoolP("version", "v", false, usage) @@ -1239,9 +1270,9 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.displayName() + ` help [path to command] for full details.`, - ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - var completions []string +Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, + ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { + var completions []Completion cmd, _, e := c.Root().Find(args) if e != nil { return nil, ShellCompDirectiveNoFileComp @@ -1253,7 +1284,7 @@ Simply type ` + c.displayName() + ` help [path to command] for full details.`, for _, subCmd := range cmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } } } @@ -1430,10 +1461,12 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } - return c.displayName() + return c.DisplayName() } -func (c *Command) displayName() string { +// DisplayName returns the name to display in help text. Returns command Name() +// If CommandDisplayNameAnnoation is not set +func (c *Command) DisplayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1443,7 +1476,7 @@ func (c *Command) displayName() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string - use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) + use := strings.Replace(c.Use, c.Name(), c.DisplayName(), 1) if c.HasParent() { useline = c.parent.CommandPath() + " " + use } else { @@ -1649,7 +1682,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1664,7 +1697,7 @@ func (c *Command) Flags() *flag.FlagSet { func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + out := flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1679,7 +1712,7 @@ func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1707,7 +1740,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1736,7 +1769,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1749,9 +1782,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1868,7 +1901,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } @@ -1894,3 +1927,141 @@ func commandNameMatches(s string, t string) bool { return s == t } + +// tmplFunc holds a template and a function that will execute said template. +type tmplFunc struct { + tmpl string + fn func(io.Writer, interface{}) error +} + +var defaultUsageTemplate = `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} + +Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} + +{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} + +Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` + +// defaultUsageFunc is equivalent to executing defaultUsageTemplate. The two should be changed in sync. +func defaultUsageFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + fmt.Fprint(w, "Usage:") + if c.Runnable() { + fmt.Fprintf(w, "\n %s", c.UseLine()) + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n %s [command]", c.CommandPath()) + } + if len(c.Aliases) > 0 { + fmt.Fprintf(w, "\n\nAliases:\n") + fmt.Fprintf(w, " %s", c.NameAndAliases()) + } + if c.HasExample() { + fmt.Fprintf(w, "\n\nExamples:\n") + fmt.Fprintf(w, "%s", c.Example) + } + if c.HasAvailableSubCommands() { + cmds := c.Commands() + if len(c.Groups()) == 0 { + fmt.Fprintf(w, "\n\nAvailable Commands:") + for _, subcmd := range cmds { + if subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } else { + for _, group := range c.Groups() { + fmt.Fprintf(w, "\n\n%s", group.Title) + for _, subcmd := range cmds { + if subcmd.GroupID == group.ID && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + if !c.AllChildCommandsHaveGroup() { + fmt.Fprintf(w, "\n\nAdditional Commands:") + for _, subcmd := range cmds { + if subcmd.GroupID == "" && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + } + } + if c.HasAvailableLocalFlags() { + fmt.Fprintf(w, "\n\nFlags:\n") + fmt.Fprint(w, trimRightSpace(c.LocalFlags().FlagUsages())) + } + if c.HasAvailableInheritedFlags() { + fmt.Fprintf(w, "\n\nGlobal Flags:\n") + fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) + } + if c.HasHelpSubCommands() { + fmt.Fprintf(w, "\n\nAdditional help topcis:") + for _, subcmd := range c.Commands() { + if subcmd.IsAdditionalHelpTopicCommand() { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) + } + } + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n\nUse \"%s [command] --help\" for more information about a command.", c.CommandPath()) + } + fmt.Fprintln(w) + return nil +} + +var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + +// defaultHelpFunc is equivalent to executing defaultHelpTemplate. The two should be changed in sync. +func defaultHelpFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + usage := c.Long + if usage == "" { + usage = c.Short + } + usage = trimRightSpace(usage) + if usage != "" { + fmt.Fprintln(w, usage) + fmt.Fprintln(w) + } + if c.Runnable() || c.HasSubCommands() { + fmt.Fprint(w, c.UsageString()) + } + return nil +} + +var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` + +// defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. +func defaultVersionFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + _, err := fmt.Fprintf(w, "%s version %s\n", c.DisplayName(), c.Version) + return err +} diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index c0c08b05721..a1752f76317 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -35,7 +35,7 @@ const ( ) // Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} +var flagCompletionFunctions = map[*pflag.Flag]CompletionFunc{} // lock for reading and writing from flagCompletionFunctions var flagCompletionMutex = &sync.RWMutex{} @@ -117,22 +117,50 @@ type CompletionOptions struct { HiddenDefaultCmd bool } +// Completion is a string that can be used for completions +// +// two formats are supported: +// - the completion choice +// - the completion choice with a textual description (separated by a TAB). +// +// [CompletionWithDesc] can be used to create a completion string with a textual description. +// +// Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used. +type Completion = string + +// CompletionFunc is a function that provides completion results. +type CompletionFunc = func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) + +// CompletionWithDesc returns a [Completion] with a description by using the TAB delimited format. +func CompletionWithDesc(choice string, description string) Completion { + return choice + "\t" + description +} + // NoFileCompletions can be used to disable file completion for commands that should // not trigger file completions. -func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method satisfies [CompletionFunc]. +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return nil, ShellCompDirectiveNoFileComp } // FixedCompletions can be used to create a completion function which always // returns the same results. -func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method returns a function that satisfies [CompletionFunc] +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func FixedCompletions(choices []Completion, directive ShellCompDirective) CompletionFunc { + return func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return choices, directive } } // RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { +// +// You can use pre-defined completion functions such as [FixedCompletions] or [NoFileCompletions], +// or you can define your own. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f CompletionFunc) error { flag := c.Flag(flagName) if flag == nil { return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) @@ -148,7 +176,7 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman } // GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. -func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { +func (c *Command) GetFlagCompletionFunc(flagName string) (CompletionFunc, bool) { flag := c.Flag(flagName) if flag == nil { return nil, false @@ -270,7 +298,15 @@ func (c *Command) initCompleteCmd(args []string) { } } -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { +// SliceValue is a reduced version of [pflag.SliceValue]. It is used to detect +// flags that accept multiple values and therefore can provide completion +// multiple times. +type SliceValue interface { + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + +func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCompDirective, error) { // The last argument, which is not completely typed by the user, // should not be part of the list of arguments toComplete := args[len(args)-1] @@ -298,7 +334,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) + return c, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -328,7 +364,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // Parse the flags early so we can check if required flags are set if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + return finalCmd, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) } realArgCount := finalCmd.Flags().NArg() @@ -340,14 +376,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { - return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } // Look for the --help or --version flags. If they are present, // there should be no further completions. if helpOrVersionFlagPresent(finalCmd) { - return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil + return finalCmd, []Completion{}, ShellCompDirectiveNoFileComp, nil } // We only remove the flags from the arguments if DisableFlagParsing is not set. @@ -376,11 +412,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil } // Directory completion - return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + return finalCmd, []Completion{}, ShellCompDirectiveFilterDirs, nil } } - var completions []string + var completions []Completion var directive ShellCompDirective // Enforce flag groups before doing flag completions @@ -399,10 +435,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // If we have not found any required flags, only then can we show regular flags if len(completions) == 0 { doCompleteFlags := func(flag *pflag.Flag) { - if !flag.Changed || + _, acceptsMultiple := flag.Value.(SliceValue) + acceptsMultiple = acceptsMultiple || strings.Contains(flag.Value.Type(), "Slice") || - strings.Contains(flag.Value.Type(), "Array") { - // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + strings.Contains(flag.Value.Type(), "Array") || + strings.HasPrefix(flag.Value.Type(), "stringTo") + + if !flag.Changed || acceptsMultiple { + // If the flag is not already present, or if it can be specified multiple times (Array, Slice, or stringTo) // we suggest it as a completion completions = append(completions, getFlagNameCompletions(flag, toComplete)...) } @@ -462,7 +502,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi for _, subCmd := range finalCmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } directive = ShellCompDirectiveNoFileComp } @@ -507,7 +547,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + var completionFn CompletionFunc if flag != nil && flagCompletion { flagCompletionMutex.RLock() completionFn = flagCompletionFunctions[flag] @@ -518,7 +558,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if completionFn != nil { // Go custom completion defined for this flag or command. // Call the registered completion function to get the completions. - var comps []string + var comps []Completion comps, directive = completionFn(finalCmd, finalArgs, toComplete) completions = append(completions, comps...) } @@ -531,23 +571,23 @@ func helpOrVersionFlagPresent(cmd *Command) bool { len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed { return true } - if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil && + if helpFlag := cmd.Flags().Lookup(helpFlagName); helpFlag != nil && len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed { return true } return false } -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []Completion { if nonCompletableFlag(flag) { - return []string{} + return []Completion{} } - var completions []string + var completions []Completion flagName := "--" + flag.Name if strings.HasPrefix(flagName, toComplete) { // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // Why suggest both long forms: --flag and --flag= ? // This forces the user to *always* have to type either an = or a space after the flag name. @@ -559,20 +599,20 @@ func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { // if len(flag.NoOptDefVal) == 0 { // // Flag requires a value, so it can be suffixed with = // flagName += "=" - // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // } } flagName = "-" + flag.Shorthand if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) } return completions } -func completeRequireFlags(finalCmd *Command, toComplete string) []string { - var completions []string +func completeRequireFlags(finalCmd *Command, toComplete string) []Completion { + var completions []Completion doCompleteRequiredFlags := func(flag *pflag.Flag) { if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { @@ -687,8 +727,8 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p // 1- the feature has been explicitly disabled by the program, // 2- c has no subcommands (to avoid creating one), // 3- c already has a 'completion' command provided by the program. -func (c *Command) InitDefaultCompletionCmd() { - if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { +func (c *Command) InitDefaultCompletionCmd(args ...string) { + if c.CompletionOptions.DisableDefaultCmd { return } @@ -701,6 +741,16 @@ func (c *Command) InitDefaultCompletionCmd() { haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + // Special case to know if there are sub-commands or not. + hasSubCommands := false + for _, cmd := range c.commands { + if cmd.Name() != ShellCompRequestCmd && cmd.Name() != helpCommandName { + // We found a real sub-command (not 'help' or '__complete') + hasSubCommands = true + break + } + } + completionCmd := &Command{ Use: compCmdName, Short: "Generate the autocompletion script for the specified shell", @@ -714,6 +764,22 @@ See each sub-command's help for details on how to use the generated script. } c.AddCommand(completionCmd) + if !hasSubCommands { + // If the 'completion' command will be the only sub-command, + // we only create it if it is actually being called. + // This avoids breaking programs that would suddenly find themselves with + // a subcommand, which would prevent them from accepting arguments. + // We also create the 'completion' command if the user is triggering + // shell completion for it (prog __complete completion '') + subCmd, cmdArgs, err := c.Find(args) + if err != nil || subCmd.Name() != compCmdName && + !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + // The completion command is not being called or being completed so we remove it. + c.RemoveCommand(completionCmd) + return + } + } + out := c.OutOrStdout() noDesc := c.CompletionOptions.DisableDescriptions shortDesc := "Generate the autocompletion script for %s" diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index a830b7bcad2..746dcb92e3e 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -162,7 +162,10 @@ filter __%[1]s_escapeStringWithSpecialChars { if (-Not $Description) { $Description = " " } - @{Name="$Name";Description="$Description"} + New-Object -TypeName PSCustomObject -Property @{ + Name = "$Name" + Description = "$Description" + } } @@ -240,7 +243,12 @@ filter __%[1]s_escapeStringWithSpecialChars { __%[1]s_debug "Only one completion left" # insert space after value - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } else { # Add the proper number of spaces to align the descriptions @@ -255,7 +263,12 @@ filter __%[1]s_escapeStringWithSpecialChars { $Description = " ($($comp.Description))" } - [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + $CompletionText = "$($comp.Name)$Description" + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } @@ -264,7 +277,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # insert space after value # MenuComplete will automatically show the ToolTip of # the highlighted value at the bottom of the suggestions. - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } # TabCompleteNext and in case we get something unknown @@ -272,7 +291,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # Like MenuComplete but we don't want to add a space here because # the user need to press space anyway to get the completion. # Description will not be shown because that's not possible with TabCompleteNext - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } diff --git a/vendor/github.com/spf13/pflag/.editorconfig b/vendor/github.com/spf13/pflag/.editorconfig new file mode 100644 index 00000000000..4492e9f9fe1 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab diff --git a/vendor/github.com/spf13/pflag/.golangci.yaml b/vendor/github.com/spf13/pflag/.golangci.yaml new file mode 100644 index 00000000000..b274f248451 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.golangci.yaml @@ -0,0 +1,4 @@ +linters: + disable-all: true + enable: + - nolintlint diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 24a5036e95b..7c058de3744 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -160,7 +160,7 @@ type FlagSet struct { args []string // arguments after flags argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor + output io.Writer // nil means stderr; use Output() accessor interspersed bool // allow interspersed option/non-option args normalizeNameFunc func(f *FlagSet, name string) NormalizedName @@ -255,13 +255,20 @@ func (f *FlagSet) normalizeFlagName(name string) NormalizedName { return n(f, name) } -func (f *FlagSet) out() io.Writer { +// Output returns the destination for usage and error messages. os.Stderr is returned if +// output was not set or was set to nil. +func (f *FlagSet) Output() io.Writer { if f.output == nil { return os.Stderr } return f.output } +// Name returns the name of the flag set. +func (f *FlagSet) Name() string { + return f.name +} + // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { @@ -358,7 +365,7 @@ func (f *FlagSet) ShorthandLookup(name string) *Flag { } if len(name) > 1 { msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } c := name[0] @@ -482,7 +489,7 @@ func (f *FlagSet) Set(name, value string) error { } if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) } return nil } @@ -523,7 +530,7 @@ func Set(name, value string) error { // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) + fmt.Fprint(f.Output(), usages) } // defaultIsZeroValue returns true if the default value for this flag represents @@ -758,7 +765,7 @@ func PrintDefaults() { // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name) f.PrintDefaults() } @@ -844,7 +851,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { _, alreadyThere := f.formal[normalizedFlagName] if alreadyThere { msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) + fmt.Fprintln(f.Output(), msg) panic(msg) // Happens only if flags are declared with identical names } if f.formal == nil { @@ -860,7 +867,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { } if len(flag.Shorthand) > 1 { msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } if f.shorthands == nil { @@ -870,7 +877,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { used, alreadyThere := f.shorthands[c] if alreadyThere { msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } f.shorthands[c] = flag @@ -909,7 +916,7 @@ func VarP(value Value, name, shorthand, usage string) { func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.out(), err) + fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -1060,7 +1067,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse } if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) } err = fn(flag, value) diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go index 3d414ba69fe..06b8bcb5721 100644 --- a/vendor/github.com/spf13/pflag/ip.go +++ b/vendor/github.com/spf13/pflag/ip.go @@ -16,6 +16,9 @@ func newIPValue(val net.IP, p *net.IP) *ipValue { func (i *ipValue) String() string { return net.IP(*i).String() } func (i *ipValue) Set(s string) error { + if s == "" { + return nil + } ip := net.ParseIP(strings.TrimSpace(s)) if ip == nil { return fmt.Errorf("failed to parse IP: %q", s) diff --git a/vendor/github.com/spf13/pflag/ipnet_slice.go b/vendor/github.com/spf13/pflag/ipnet_slice.go new file mode 100644 index 00000000000..6b541aa8798 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -0,0 +1,147 @@ +package pflag + +import ( + "fmt" + "io" + "net" + "strings" +) + +// -- ipNetSlice Value +type ipNetSliceValue struct { + value *[]net.IPNet + changed bool +} + +func newIPNetSliceValue(val []net.IPNet, p *[]net.IPNet) *ipNetSliceValue { + ipnsv := new(ipNetSliceValue) + ipnsv.value = p + *ipnsv.value = val + return ipnsv +} + +// Set converts, and assigns, the comma-separated IPNet argument string representation as the []net.IPNet value of this flag. +// If Set is called on a flag that already has a []net.IPNet assigned, the newly converted values will be appended. +func (s *ipNetSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + ipNetStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse ip values into slice + out := make([]net.IPNet, 0, len(ipNetStrSlice)) + for _, ipNetStr := range ipNetStrSlice { + _, n, err := net.ParseCIDR(strings.TrimSpace(ipNetStr)) + if err != nil { + return fmt.Errorf("invalid string being converted to CIDR: %s", ipNetStr) + } + out = append(out, *n) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *ipNetSliceValue) Type() string { + return "ipNetSlice" +} + +// String defines a "native" format for this net.IPNet slice flag value. +func (s *ipNetSliceValue) String() string { + + ipNetStrSlice := make([]string, len(*s.value)) + for i, n := range *s.value { + ipNetStrSlice[i] = n.String() + } + + out, _ := writeAsCSV(ipNetStrSlice) + return "[" + out + "]" +} + +func ipNetSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Emtpy string would cause a slice with one (empty) entry + if len(val) == 0 { + return []net.IPNet{}, nil + } + ss := strings.Split(val, ",") + out := make([]net.IPNet, len(ss)) + for i, sval := range ss { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err != nil { + return nil, fmt.Errorf("invalid string being converted to CIDR: %s", sval) + } + out[i] = *n + } + return out, nil +} + +// GetIPNetSlice returns the []net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNetSlice(name string) ([]net.IPNet, error) { + val, err := f.getFlagType(name, "ipNetSlice", ipNetSliceConv) + if err != nil { + return []net.IPNet{}, err + } + return val.([]net.IPNet), nil +} + +// IPNetSliceVar defines a ipNetSlice flag with specified name, default value, and usage string. +// The argument p points to a []net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { + f.VarP(newIPNetSliceValue(value, p), name, "", usage) +} + +// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { + f.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) +} + +// IPNetSliceVar defines a []net.IPNet flag with specified name, default value, and usage string. +// The argument p points to a []net.IPNet variable in which to store the value of the flag. +func IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { + CommandLine.VarP(newIPNetSliceValue(value, p), name, "", usage) +} + +// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { + CommandLine.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) +} + +// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of a []net.IPNet variable that stores the value of that flag. +func (f *FlagSet) IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { + p := []net.IPNet{} + f.IPNetSliceVarP(&p, name, "", value, usage) + return &p +} + +// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { + p := []net.IPNet{} + f.IPNetSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of the flag. +func IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { + return CommandLine.IPNetSliceP(name, "", value, usage) +} + +// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. +func IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { + return CommandLine.IPNetSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index 4894af81802..d1ff0a96ba0 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -31,11 +31,7 @@ func (s *stringArrayValue) Append(val string) error { func (s *stringArrayValue) Replace(val []string) error { out := make([]string, len(val)) for i, d := range val { - var err error out[i] = d - if err != nil { - return err - } } *s.value = out return nil diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 00000000000..773c9b6431f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/github.com/OneOfOne/xxhash/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE similarity index 94% rename from vendor/github.com/OneOfOne/xxhash/LICENSE rename to vendor/go.opentelemetry.io/auto/sdk/LICENSE index 9e30b4f342e..261eeb9e9f8 100644 --- a/vendor/github.com/OneOfOne/xxhash/LICENSE +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -178,10 +178,24 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 00000000000..088d19a6ce7 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 00000000000..ad73d8cb9d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 00000000000..af6ef171f6a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 00000000000..949e2165c05 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 00000000000..e854d7e84e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 00000000000..29e629d6674 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 00000000000..cecad8bae3c --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 00000000000..b6f2e28d408 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 00000000000..a13a6b733da --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 00000000000..1217776ead1 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 00000000000..69a348f0f06 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 00000000000..0dd01b063a3 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 00000000000..86babf1a885 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 00000000000..6ebea12a9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 00000000000..cbcfabde3b1 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 00000000000..dbc477a59ad --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd20..b25641c55d3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index e555a475f13..3ea05d01995 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -21,15 +22,16 @@ type middleware struct { operation string server string - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - publicEndpoint bool - publicEndpointFn func(*http.Request) bool + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + metricAttributesFn func(*http.Request) []attribute.KeyValue semconv semconv.HTTPServer } @@ -79,6 +81,7 @@ func (h *middleware) configure(c *config) { h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) + h.metricAttributesFn = c.MetricAttributesFn } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -189,14 +192,16 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + metricAttributes := semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), + } + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ - ServerName: h.server, - ResponseSize: bytesWritten, - MetricAttributes: semconv.MetricAttributes{ - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - }, + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: metricAttributes, MetricData: semconv.MetricData{ RequestSize: bw.BytesRead(), ElapsedTime: elapsedTime, @@ -204,6 +209,14 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http }) } +func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if h.metricAttributesFn != nil { + attributeForRequest = h.metricAttributesFn(r) + } + return attributeForRequest +} + // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go index a945f556616..866aa21dced 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/request/body_wrapper.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go new file mode 100644 index 00000000000..9e00dd2fcef --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +// Generate request package: +//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper.go.tmpl "--data={}" --out=body_wrapper.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper_test.go.tmpl "--data={}" --out=body_wrapper_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper.go.tmpl "--data={}" --out=resp_writer_wrapper.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper_test.go.tmpl "--data={}" --out=resp_writer_wrapper_test.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index fbc344cbdda..73184e7d005 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/request/resp_writer_wrapper.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index fb893b25042..4693a019495 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/env.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -9,12 +12,17 @@ import ( "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" ) +// OTelSemConvStabilityOptIn is an environment variable. +// That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions. +const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" + type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -30,6 +38,11 @@ type HTTPServer struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram + + // New metrics + requestBodySizeHistogram metric.Int64Histogram + responseBodySizeHistogram metric.Int64Histogram + requestDurationHistogram metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -50,9 +63,18 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return OldHTTPServer{}.RequestTraceAttrs(server, req) +} + +func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { + if s.duplicate { + return append([]attribute.KeyValue{OldHTTPServer{}.NetworkTransportAttr(network)}, CurrentHTTPServer{}.NetworkTransportAttr(network)) + } + return []attribute.KeyValue{ + OldHTTPServer{}.NetworkTransportAttr(network), } - return oldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -60,14 +82,14 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return OldHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) + return OldHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -102,29 +124,56 @@ type MetricData struct { ElapsedTime float64 } -func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. - return +var ( + metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, + } + + metricRecordOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.RecordOption{} + }, } +) - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + if s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) + } - // TODO: Duplicate Metrics + if s.duplicate && s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { + attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) + s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) + s.requestDurationHistogram.Record(ctx, md.ElapsedTime, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) + } } func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) duplicate := env == "http/dup" server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + if duplicate { + server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) + } return server } @@ -135,32 +184,41 @@ type HTTPClient struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram + + // new metrics + requestBodySize metric.Int64Histogram + requestDuration metric.Float64Histogram } func NewHTTPClient(meter metric.Meter) HTTPClient { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := env == "http/dup" client := HTTPClient{ - duplicate: env == "http/dup", + duplicate: duplicate, } - client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter) + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + if duplicate { + client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) + } + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) } - return oldHTTPClient{}.RequestTraceAttrs(req) + return OldHTTPClient{}.RequestTraceAttrs(req) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) } - return oldHTTPClient{}.ResponseTraceAttrs(resp) + return OldHTTPClient{}.ResponseTraceAttrs(resp) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -175,7 +233,7 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { func (c HTTPClient) ErrorType(err error) attribute.KeyValue { if c.duplicate { - return newHTTPClient{}.ErrorType(err) + return CurrentHTTPClient{}.ErrorType(err) } return attribute.KeyValue{} @@ -194,34 +252,56 @@ func (o MetricOpts) AddOptions() metric.AddOption { return o.addOptions } -func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { - attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - // TODO: Duplicate Metrics +func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - return MetricOpts{ + opts["old"] = MetricOpts{ measurement: set, addOptions: set, } + + if c.duplicate { + attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + } + + return opts } -func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { if s.requestBytesCounter == nil || s.latencyMeasure == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) - // TODO: Duplicate Metrics + if s.duplicate { + s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + s.requestDuration.Record(ctx, md.ElapsedTime, opts["new"].MeasurementOption()) + } } -func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { if s.responseBytesCounter == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.responseBytesCounter.Add(ctx, responseData, opts) - // TODO: Duplicate Metrics + s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) +} + +func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { + if s.duplicate { + return append(OldHTTPClient{}.TraceAttributes(host), CurrentHTTPClient{}.TraceAttributes(host)...) + } + + return OldHTTPClient{}.TraceAttributes(host) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go new file mode 100644 index 00000000000..f2cf8a152d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +// Generate semconv package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 745b8c67bc4..8b85eff90a7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/httpconv.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -7,14 +10,17 @@ import ( "fmt" "net/http" "reflect" + "slices" "strconv" "strings" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -type newHTTPServer struct{} +type CurrentHTTPServer struct{} // TraceRequest returns trace attributes for an HTTP request received by a // server. @@ -32,18 +38,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } @@ -59,7 +65,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -104,7 +110,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -135,7 +141,20 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att return attrs } -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (o CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return semconvNew.NetworkTransportTCP + case "udp", "udp4", "udp6": + return semconvNew.NetworkTransportUDP + case "unix", "unixgram", "unixpacket": + return semconvNew.NetworkTransportUnix + default: + return semconvNew.NetworkTransportPipe + } +} + +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -150,7 +169,7 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconvNew.URLScheme("https") } @@ -160,7 +179,7 @@ func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive // TraceResponse returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int if resp.ReadBytes > 0 { @@ -195,14 +214,94 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } -type newHTTPClient struct{} +func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription), + ) + handleErr(err) + + responseBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerResponseBodySizeName, + metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription), + ) + handleErr(err) + requestDurationHistogram, err := meter.Float64Histogram( + semconvNew.HTTPServerRequestDurationName, + metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), + ) + handleErr(err) + + return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram +} + +func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconvNew.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconvNew.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* below attributes are returned: - http.request.method @@ -222,7 +321,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -284,7 +383,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* below attributes are returned: - http.response.status_code @@ -311,7 +410,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -328,7 +427,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -343,6 +442,98 @@ func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } +func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySize, err := meter.Int64Histogram( + semconvNew.HTTPClientRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription), + ) + handleErr(err) + + requestDuration, err := meter.Float64Histogram( + semconvNew.HTTPClientRequestDurationName, + metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), + ) + handleErr(err) + + return requestBodySize, requestDuration +} + +func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconvNew.ServerAddress(requestHost), + n.scheme(req.TLS != nil), + ) + + if port > 0 { + attributes = append(attributes, semconvNew.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +// Attributes for httptrace. +func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue { + return []attribute.KeyValue{ + semconvNew.ServerAddress(host), + } +} + +func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + func isErrorStatusCode(code int) bool { return code >= 400 || code < 100 } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e6e14924f57..315d3dd29cd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/util.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -14,14 +17,14 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { @@ -75,7 +78,16 @@ func serverClientIP(xForwardedFor string) string { func netProtocol(proto string) (name string, version string) { name, version, _ = strings.Cut(proto, "/") - name = strings.ToLower(name) + switch name { + case "HTTP": + name = "http" + case "QUIC": + name = "quic" + case "SPDY": + name = "spdy" + default: + name = strings.ToLower(name) + } return name, version } @@ -96,3 +108,13 @@ func handleErr(err error) { otel.Handle(err) } } + +func standardizeHTTPMethod(method string) string { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return method +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index 5367732ec5d..742c2113e1b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/v120.0.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -8,7 +11,6 @@ import ( "io" "net/http" "slices" - "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" @@ -17,7 +19,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -35,14 +37,18 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { return semconvutil.HTTPServerRequest(server, req) } +func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { + return semconvutil.NetTransport(network) +} + // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -67,7 +73,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -84,7 +90,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -113,17 +119,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { n := len(additionalAttributes) + 3 var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -144,7 +150,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,24 +170,24 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -type oldHTTPClient struct{} +type OldHTTPClient struct{} -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { return semconvutil.HTTPClientRequest(req) } -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } -func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string http.status_code int @@ -197,7 +203,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit var requestHost string var requestPort int for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -214,7 +220,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), semconv.NetPeerName(requestHost), ) @@ -235,7 +241,7 @@ const ( clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds ) -func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -263,12 +269,9 @@ func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, latencyMeasure } -func standardizeHTTPMethodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" +// Attributes for httptrace. +func (c OldHTTPClient) TraceAttributes(host string) []attribute.KeyValue { + return []attribute.KeyValue{ + semconv.NetHostName(host), } - return semconv.HTTPMethod(method) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index b80a1db61fa..de74fa252a9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -200,6 +200,15 @@ func splitHostPort(hostport string) (host string, port int) { func netProtocol(proto string) (name string, version string) { name, version, _ = strings.Cut(proto, "/") - name = strings.ToLower(name) + switch name { + case "HTTP": + name = "http" + case "QUIC": + name = "quic" + case "SPDY": + name = "spdy" + default: + name = strings.ToLower(name) + } return name, version } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 39681ad4b09..44b86ad8609 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -153,7 +153,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) + t.semconv.RecordResponseSize(ctx, n, metricOpts) } // traces diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 16ef3cb9b94..1ec9a00c7a4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.57.0" + return "0.60.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/zpages/internal/gen.go b/vendor/go.opentelemetry.io/contrib/zpages/internal/gen.go index b00306c91a2..33a0a767468 100644 --- a/vendor/go.opentelemetry.io/contrib/zpages/internal/gen.go +++ b/vendor/go.opentelemetry.io/contrib/zpages/internal/gen.go @@ -5,5 +5,8 @@ package internal // import "go.opentelemetry.io/contrib/zpages/internal" import "embed" +// Templates embeds all the HTML templates used used to serve the tracez +// endpoint +// //go:embed templates/* var Templates embed.FS diff --git a/vendor/go.opentelemetry.io/contrib/zpages/version.go b/vendor/go.opentelemetry.io/contrib/zpages/version.go index 9444a213628..a70c3a72a48 100644 --- a/vendor/go.opentelemetry.io/contrib/zpages/version.go +++ b/vendor/go.opentelemetry.io/contrib/zpages/version.go @@ -5,7 +5,7 @@ package zpages // import "go.opentelemetry.io/contrib/zpages" // Version is the current release version of the zpages span processor. func Version() string { - return "0.57.0" + return "0.60.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index ae8577ef366..749e8e881bb 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -1,6 +1,7 @@ .DS_Store Thumbs.db +.cache/ .tools/ venv/ .idea/ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index dbfb2a165a0..c58e48ab0c4 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -22,14 +22,16 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck - - tenv - testifylint - typecheck - unconvert - unused - unparam + - usestdlibvars + - usetesting issues: # Maximum issues count per one linter. @@ -61,10 +63,11 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec + - perfsprint # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" @@ -95,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -154,137 +164,71 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. # Default: 0.8 confidence: 0.01 + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - name: blank-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - name: bool-literal-in-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - name: constant-logical-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument - # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 - name: context-as-argument disabled: true arguments: - allowTypesBefore: "*testing.T" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - allowTypesBefore: "*testing.T" - name: context-keys-type - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit - name: deep-exit - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer - name: defer - disabled: false arguments: - ["call-chain", "loop"] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports - name: dot-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports - name: duplicated-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return - name: early-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + arguments: + - "preserveScope" - name: empty-block - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines - name: empty-lines - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming - name: error-naming - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return - name: error-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings - name: error-strings - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf - name: errorf - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported - name: exported - disabled: false arguments: - "sayRepetitiveInsteadOfStutters" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter - name: flag-parameter - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches - name: identical-branches - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return - name: if-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: import-shadowing - name: increment-decrement - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow - name: indent-error-flow - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing - - name: import-shadowing - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + arguments: + - "preserveScope" - name: package-comments - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range - name: range - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure - name: range-val-in-closure - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address - name: range-val-address - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id - name: redefines-builtin-id - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format - name: string-format - disabled: false arguments: - - panic - '/^[^\n]*$/' - must not contain line breaks - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - name: struct-tag - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else - name: superfluous-else - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal - - name: time-equal - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming - - name: var-naming - disabled: false arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration - - name: var-declaration - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - "preserveScope" + - name: time-equal - name: unconditional-recursion - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return - name: unexported-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error - name: unhandled-error - disabled: false arguments: - "fmt.Fprint" - "fmt.Fprintf" @@ -292,15 +236,14 @@ linters-settings: - "fmt.Print" - "fmt.Printf" - "fmt.Println" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt - name: unnecessary-stmt - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - name: useless-break - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: var-declaration + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList - name: waitgroup-by-value - disabled: false testifylint: enable-all: true disable: diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 8f68dbd04ae..c076db2823f 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,87 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.35.0/0.57.0/0.11.0] 2025-03-05 + +This release is the last to support [Go 1.22]. +The next release will require at least [Go 1.23]. + +### Added + +- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187) +- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210) +- The `go.opentelemetry.io/otel/semconv/v1.28.0` package. + The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236) +- The `go.opentelemetry.io/otel/semconv/v1.30.0` package. + The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240) +- Document the pitfalls of using `Resource` as a comparable type. + `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272) +- Support [Go 1.24]. (#6304) +- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. + It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`. + Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317) + +### Changed + +- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198) + +### Fixes + +- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368) +- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369) + +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + ## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 ### Added @@ -3156,7 +3237,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD +[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 @@ -3245,6 +3329,7 @@ It contains api and sdk for trace and meter. +[Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 22a2e9dbd49..7b8af585aab 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -181,6 +181,18 @@ patterns in the spec. For a deeper discussion, see [this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). +## Tests + +Each functionality should be covered by tests. + +Performance-critical functionality should also be covered by benchmarks. + +- Pull requests adding a performance-critical functionality +should have `go test -bench` output in their description. +- Pull requests changing a performance-critical functionality +should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) +output in their description. + ## Documentation Each (non-internal, non-test) package must be documented using diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index b8292a4fb91..226410d7428 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -11,11 +11,15 @@ ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} GO = go TIMEOUT = 60 +# User to run as in docker images. +DOCKER_USER=$(shell id -u):$(shell id -g) +DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile + .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -81,20 +85,20 @@ PIP := $(PYTOOLS)/pip WORKDIR := /workdir # The python image to use for the virtual environment. -PYTHONIMAGE := python:3.11.3-slim-bullseye +PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) # Run the python image with the current directory mounted. -DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) +DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) # Create a virtual environment for Python tools. $(PYTOOLS): # The `--upgrade` flag is needed to ensure that the virtual environment is # created with the latest pip version. - @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip" # Install python packages into the virtual environment. $(PYTOOLS)/%: $(PYTOOLS) - @$(DOCKERPY) $(PIP) install -r requirements.txt + @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt CODESPELL = $(PYTOOLS)/codespell $(CODESPELL): PACKAGE=codespell @@ -119,7 +123,7 @@ vanity-import-fix: $(PORTO) # Generate go.work file for local development. .PHONY: go-work go-work: $(CROSSLINK) - $(CROSSLINK) work --root=$(shell pwd) + $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7 # Build @@ -235,6 +239,16 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -255,13 +269,30 @@ check-clean-work-tree: exit 1; \ fi +# The weaver docker image to use for semconv-generate. +WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) + SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) - [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" + # Ensure the target directory for source code is available. + mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG} + # Note: We mount a home directory for downloading/storing the semconv repository. + # Weaver will automatically clean the cache when finished, but the directories will remain. + mkdir -p ~/.weaver + docker run --rm \ + -u $(DOCKER_USER) \ + --env HOME=/tmp/weaver \ + --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ + --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ + $(WEAVER_IMAGE) registry generate \ + --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \ + --templates=/home/weaver/templates \ + --param tag=$(TAG) \ + go \ + /home/weaver/target $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index efec278905b..8421cd7e597 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -1,9 +1,11 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). @@ -49,18 +51,25 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.24 | amd64 | | Ubuntu | 1.23 | amd64 | | Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.24 | 386 | | Ubuntu | 1.23 | 386 | | Ubuntu | 1.22 | 386 | -| Linux | 1.23 | arm64 | -| Linux | 1.22 | arm64 | +| Ubuntu | 1.24 | arm64 | +| Ubuntu | 1.23 | arm64 | +| Ubuntu | 1.22 | arm64 | +| macOS 13 | 1.24 | amd64 | | macOS 13 | 1.23 | amd64 | | macOS 13 | 1.22 | amd64 | +| macOS | 1.24 | arm64 | | macOS | 1.23 | arm64 | | macOS | 1.22 | arm64 | +| Windows | 1.24 | amd64 | | Windows | 1.23 | amd64 | | Windows | 1.22 | amd64 | +| Windows | 1.24 | 386 | | Windows | 1.23 | 386 | | Windows | 1.22 | 386 | diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index ffa9b61258a..1e13ae54f71 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -5,17 +5,14 @@ New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. -1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. -2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` -3. Run the `make semconv-generate ...` target from this repository. +1. Set the `TAG` environment variable to the semantic convention tag you want to generate. +2. Run the `make semconv-generate ...` target from this repository. For example, ```sh -export TAG="v1.21.0" # Change to the release version you are generating. -export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" -docker pull otel/semconvgen:latest -make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +export TAG="v1.30.0" # Change to the release version you are generating. +make semconv-generate # Uses the exported TAG. ``` This should create a new sub-package of [`semconv`](./semconv). @@ -130,6 +127,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362bb..b8cb605c166 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 36f5367030c..0e1fe242203 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -355,7 +355,7 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -363,7 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder - b.Grow(cap) + b.Grow(c) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 2acbac35466..49a35b12255 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile new file mode 100644 index 00000000000..e4c4a753c88 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -0,0 +1,3 @@ +# This is a renovate-friendly source of Docker images. +FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python +FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 2171bee3c84..8409b5f8f95 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -294,7 +294,7 @@ func (c *client) MarshalLog() interface{} { Type string Endpoint string }{ - Type: "otlphttpgrpc", + Type: "otlptracegrpc", Endpoint: c.endpoint, } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 3ee452ef72b..0a317d92637 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index c76bedfb167..f5cad46b74d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.32.0" + return "1.35.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 822d8479474..691d96c7554 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index ac65262c656..8982aa0dc56 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -145,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 0a29a2f13d8..a6fa353f95c 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -1,7 +1,7 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "config:recommended" + "config:best-practices" ], "ignorePaths": [], "labels": ["Skip Changelog", "dependencies"], @@ -15,10 +15,8 @@ "enabled": true }, { - "matchFileNames": ["internal/tools/**"], - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": false + "matchPackageNames": ["go.opentelemetry.io/build-tools/**"], + "groupName": "build-tools" }, { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index ab09daf9d53..1bb55fb1cc5 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.3.0 +codespell==2.4.1 diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go index ce455dc544b..3d703c5d98e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -5,6 +5,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "encoding/xml" + "errors" "fmt" "io" "os" @@ -63,7 +64,7 @@ func parsePlistFile(file io.Reader) (map[string]string, error) { } if len(v.Dict.Key) != len(v.Dict.String) { - return nil, fmt.Errorf("the number of and elements doesn't match") + return nil, errors.New("the number of and elements doesn't match") } properties := make(map[string]string, len(v.Dict.Key)) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index ad4b50df404..09b91e1e1b0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -21,11 +21,22 @@ import ( // Resources should be passed and stored as pointers // (`*resource.Resource`). The `nil` value is equivalent to an empty // Resource. +// +// Note that the Go == operator compares not just the resource attributes but +// also all other internals of the Resource type. Therefore, Resource values +// should not be used as map or database keys. In general, the [Resource.Equal] +// method should be used instead of direct comparison with ==, since that +// method ensures the correct comparison of resource attributes, and the +// [attribute.Distinct] returned from [Resource.Equivalent] should be used for +// map and database keys instead. type Resource struct { attrs attribute.Set schemaURL string } +// Compile-time check that the Resource remains comparable. +var _ map[Resource]struct{} = nil + var ( defaultResource *Resource defaultResourceOnce sync.Once @@ -137,15 +148,19 @@ func (r *Resource) Iter() attribute.Iterator { return r.attrs.Iter() } -// Equal returns true when a Resource is equivalent to this Resource. -func (r *Resource) Equal(eq *Resource) bool { +// Equal returns whether r and o represent the same resource. Two resources can +// be equal even if they have different schema URLs. +// +// See the documentation on the [Resource] type for the pitfalls of using == +// with Resource values; most code should use Equal instead. +func (r *Resource) Equal(o *Resource) bool { if r == nil { r = Empty() } - if eq == nil { - eq = Empty() + if o == nil { + o = Empty() } - return r.Equivalent() == eq.Equivalent() + return r.Equivalent() == o.Equivalent() } // Merge creates a new [Resource] by merging a and b. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index ccc97e1b662..6872cbb4e7a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -201,10 +201,9 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { } } - wait := make(chan error) + wait := make(chan error, 1) go func() { wait <- bsp.exportSpans(ctx) - close(wait) }() // Wait until the export is finished or the context is cancelled/timed out select { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go index d2d1f72466b..9b672a1d70d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -5,7 +5,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "errors" - "fmt" "os" "strconv" "strings" @@ -26,7 +25,7 @@ const ( type errUnsupportedSampler string func (e errUnsupportedSampler) Error() string { - return fmt.Sprintf("unsupported sampler: %s", string(e)) + return "unsupported sampler: " + string(e) } var ( @@ -39,7 +38,7 @@ type samplerArgParseError struct { } func (e samplerArgParseError) Error() string { - return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) + return "parsing sampler argument: " + e.parseErr.Error() } func (e samplerArgParseError) Unwrap() error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index ebb6df6c908..aa7b262d0d9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -47,12 +47,12 @@ const ( // Drop will not record the span and all attributes/events will be dropped. Drop SamplingDecision = iota - // Record indicates the span's `IsRecording() == true`, but `Sampled` flag - // *must not* be set. + // RecordOnly indicates the span's IsRecording method returns true, but trace.FlagsSampled flag + // must not be set. RecordOnly - // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag - // *must* be set. + // RecordAndSample indicates the span's IsRecording method returns true and trace.FlagsSampled flag + // must be set. RecordAndSample ) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 554111bb4a2..664e13e03f0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -58,7 +58,7 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { var err error ssp.stopOnce.Do(func() { stopFunc := func(exp SpanExporter) (<-chan error, func()) { - done := make(chan error) + done := make(chan error, 1) return done, func() { done <- exp.Shutdown(ctx) } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 17f883c2c86..8f4fc385082 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -347,54 +347,99 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { } switch attr.Value.Type() { case attribute.STRING: - if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(safeTruncate(v, limit)) - } + v := attr.Value.AsString() + return attr.Key.String(truncate(limit, v)) case attribute.STRINGSLICE: v := attr.Value.AsStringSlice() for i := range v { - if len(v[i]) > limit { - v[i] = safeTruncate(v[i], limit) - } + v[i] = truncate(limit, v[i]) } return attr.Key.StringSlice(v) } return attr } -// safeTruncate truncates the string and guarantees valid UTF-8 is returned. -func safeTruncate(input string, limit int) string { - if trunc, ok := safeTruncateValidUTF8(input, limit); ok { - return trunc +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) - return trunc -} -// safeTruncateValidUTF8 returns a copy of the input string safely truncated to -// limit. The truncation is ensured to occur at the bounds of complete UTF-8 -// characters. If invalid encoding of UTF-8 is encountered, input is returned -// with false, otherwise, the truncated input will be returned with true. -func safeTruncateValidUTF8(input string, limit int) (string, bool) { - for cnt := 0; cnt <= limit; { - r, size := utf8.DecodeRuneInString(input[cnt:]) - if r == utf8.RuneError { - return input, false + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue } - if cnt+size > limit { - return input[:cnt], true + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue } - cnt += size + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ } - return input, true + + return b.String() } // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // -// The only SpanOption currently supported is WithTimestamp which will set the -// end time for a Span's life-cycle. +// The only SpanEndOption currently supported are [trace.WithTimestamp], and +// [trace.WithStackTrace]. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 0b214d3fe9f..2b797fbdea2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.32.0" + return "1.35.0" } diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go new file mode 100644 index 00000000000..7e2910025a9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -0,0 +1,661 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + "encoding/json" + "fmt" + "math" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/internal/telemetry" +) + +// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func newAutoTracerProvider() TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(autoTracerProvider) + +type autoTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = autoTracerProvider{} + +func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { + cfg := NewTracerConfig(opts...) + return autoTracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} + +type autoTracer struct { + embedded.Tracer + + name, schemaURL, version string +} + +var _ Tracer = autoTracer{} + +func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { + var psc SpanContext + sampled := true + span := new(autoSpan) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *autoTracer) start( + ctx context.Context, + spanPtr *autoSpan, + psc *SpanContext, + sampled *bool, + sc *SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {} + +func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + n := int64(len(links)) + if n > 0 { + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + } else { + if limit > 0 { + n := int64(max(len(links)-limit, 0)) + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind SpanKind) telemetry.SpanKind { + switch kind { + case SpanKindInternal: + return telemetry.SpanKindInternal + case SpanKindServer: + return telemetry.SpanKindServer + case SpanKindClient: + return telemetry.SpanKindClient + case SpanKindProducer: + return telemetry.SpanKindProducer + case SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} + +type autoSpan struct { + embedded.Span + + spanContext SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *autoSpan) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *autoSpan) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *autoSpan) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) + if limit == 0 { + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return nil, out + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked. +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *autoSpan) End(opts ...SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *autoSpan) end(opts []SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*autoSpan) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *autoSpan) RecordError(err error, opts ...EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *autoSpan) AddEvent(name string, opts ...EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *autoSpan) AddLink(link Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *autoSpan) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() } + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + // Ignore invalid environment variable. + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 273d58e0014..9c0b720a4d6 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go new file mode 100644 index 00000000000..f663547b4ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go new file mode 100644 index 00000000000..5debe90bbb1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go new file mode 100644 index 00000000000..7b1ae3c4ea8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go new file mode 100644 index 00000000000..f5e3a8cec9d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go new file mode 100644 index 00000000000..1798a702d4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go new file mode 100644 index 00000000000..c2b4c635b7a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go new file mode 100644 index 00000000000..3c5e1cdb1b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go @@ -0,0 +1,460 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.StartTime = time.Unix(0, v) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.EndTime = time.Unix(0, v) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), // nolint: gosec // >0 checked above + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + se.Time = time.Unix(0, v) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go new file mode 100644 index 00000000000..1d013a8fa80 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go new file mode 100644 index 00000000000..b0394070814 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go new file mode 100644 index 00000000000..7251492da06 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -0,0 +1,453 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{ + num: uint64(v), // nolint: gosec // Store raw bytes. + any: ValueKindInt64, + } +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes. + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index ca20e9997ab..c8b1ae5d67e 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -82,4 +82,22 @@ func (noopSpan) AddLink(Link) {} func (noopSpan) SetName(string) {} // TracerProvider returns a no-op TracerProvider. -func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } +func (s noopSpan) TracerProvider() TracerProvider { + return s.tracerProvider(autoInstEnabled) +} + +// autoInstEnabled defines if the auto-instrumentation SDK is enabled. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches to the process. +var autoInstEnabled = new(bool) + +// tracerProvider return a noopTracerProvider if autoEnabled is false, +// otherwise it will return a TracerProvider from the sdk package used in +// auto-instrumentation. +func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { + if *autoEnabled { + return newAutoTracerProvider() + } + return noopTracerProvider{} +} diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 59e24816137..d5fa71f6745 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.32.0" + return "1.35.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index c04b12f6b74..2b4cb4b4187 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.32.0 + version: v1.35.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -23,11 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.54.0 + version: v0.57.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.8.0 + version: v0.11.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -35,8 +35,9 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.11 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index d7099c35bc4..b342a0a9401 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -311,7 +311,8 @@ type ResourceSpans struct { // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -384,7 +385,8 @@ type ScopeSpans struct { // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 09f6a49b80a..74f052aa9fa 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index aa69fb4d509..db7806cb994 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -180,6 +180,8 @@ type CommonLanguageSettings struct { ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` // The destination where API teams want this client library to be published. Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` + // Configuration for which RPCs should be generated in the GAPIC client. + SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"` } func (x *CommonLanguageSettings) Reset() { @@ -229,6 +231,13 @@ func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { return nil } +func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration { + if x != nil { + return x.SelectiveGapicGeneration + } + return nil +} + // Details about how and where to publish client libraries. type ClientLibrarySettings struct { state protoimpl.MessageState @@ -984,6 +993,16 @@ type GoSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map of service names to renamed services. Keys are the package relative + // service names and values are the name to be used for the service client + // and call options. + // + // publishing: + // + // go_settings: + // renamed_services: + // Publisher: TopicAdmin + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *GoSettings) Reset() { @@ -1025,6 +1044,13 @@ func (x *GoSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *GoSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + // Describes the generator configuration for a method. type MethodSettings struct { state protoimpl.MessageState @@ -1123,6 +1149,71 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// This message is used to configure the generation of a subset of the RPCs in +// a service for client libraries. +type SelectiveGapicGeneration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An allowlist of the fully qualified names of RPCs that should be included + // on public client surfaces. + Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"` + // Setting this to true indicates to the client generators that methods + // that would be excluded from the generation should instead be generated + // in a way that indicates these methods should not be consumed by + // end users. How this is expressed is up to individual language + // implementations to decide. Some examples may be: added annotations, + // obfuscated identifiers, or other language idiomatic patterns. + GenerateOmittedAsInternal bool `protobuf:"varint,2,opt,name=generate_omitted_as_internal,json=generateOmittedAsInternal,proto3" json:"generate_omitted_as_internal,omitempty"` +} + +func (x *SelectiveGapicGeneration) Reset() { + *x = SelectiveGapicGeneration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelectiveGapicGeneration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectiveGapicGeneration) ProtoMessage() {} + +func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead. +func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{12} +} + +func (x *SelectiveGapicGeneration) GetMethods() []string { + if x != nil { + return x.Methods + } + return nil +} + +func (x *SelectiveGapicGeneration) GetGenerateOmittedAsInternal() bool { + if x != nil { + return x.GenerateOmittedAsInternal + } + return false +} + // Experimental features to be included during client library generation. // These fields will be deprecated once the feature graduates and is enabled // by default. @@ -1136,12 +1227,22 @@ type PythonSettings_ExperimentalFeatures struct { // This feature will be enabled by default 1 month after launching the // feature in preview packages. RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` + // Enables generation of protobuf code using new types that are more + // Pythonic which are included in `protobuf>=5.29.x`. This feature will be + // enabled by default 1 month after launching the feature in preview + // packages. + ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"` + // Disables generation of an unversioned Python package for this client + // library. This means that the module names will need to be versioned in + // import statements. For example `import google.cloud.library_v2` instead + // of `import google.cloud.library`. + UnversionedPackageDisabled bool `protobuf:"varint,3,opt,name=unversioned_package_disabled,json=unversionedPackageDisabled,proto3" json:"unversioned_package_disabled,omitempty"` } func (x *PythonSettings_ExperimentalFeatures) Reset() { *x = PythonSettings_ExperimentalFeatures{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[13] + mi := &file_google_api_client_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1154,7 +1255,7 @@ func (x *PythonSettings_ExperimentalFeatures) String() string { func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[13] + mi := &file_google_api_client_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1177,6 +1278,20 @@ func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { return false } +func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool { + if x != nil { + return x.ProtobufPythonicTypesEnabled + } + return false +} + +func (x *PythonSettings_ExperimentalFeatures) GetUnversionedPackageDisabled() bool { + if x != nil { + return x.UnversionedPackageDisabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1205,7 +1320,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[16] + mi := &file_google_api_client_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1218,7 +1333,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[16] + mi := &file_google_api_client_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1406,7 +1521,7 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, @@ -1415,251 +1530,283 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, - 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, - 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, - 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, - 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, - 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, - 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, + 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, + 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, + 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, - 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, - 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, - 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, - 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, - 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, - 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, - 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, - 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, - 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, - 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, - 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, - 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, - 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, + 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, + 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, + 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, + 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, + 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, + 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, + 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, + 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, - 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, - 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, - 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, - 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, - 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, - 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, - 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, - 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, - 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0xd2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, + 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, + 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x1c, 0x75, 0x6e, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x1a, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, + 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, + 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, + 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, + 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, + 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, + 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, + 0x56, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, + 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, + 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, + 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, + 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, + 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, + 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, + 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, + 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, - 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, - 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, - 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, - 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, - 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, - 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, - 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, - 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, - 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, - 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, - 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, - 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, + 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x5f, 0x6f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x41, 0x73, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, + 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, + 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, + 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, + 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, + 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, + 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, + 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, + 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, + 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, + 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, + 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, + 0xfa, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1675,7 +1822,7 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) var file_google_api_client_proto_goTypes = []interface{}{ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination @@ -1691,55 +1838,59 @@ var file_google_api_client_proto_goTypes = []interface{}{ (*RubySettings)(nil), // 11: google.api.RubySettings (*GoSettings)(nil), // 12: google.api.GoSettings (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures - nil, // 16: google.api.DotnetSettings.RenamedServicesEntry - nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 19: google.api.LaunchStage - (*durationpb.Duration)(nil), // 20: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions + (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration + nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures + nil, // 17: google.api.DotnetSettings.RenamedServicesEntry + nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry + nil, // 19: google.api.GoSettings.RenamedServicesEntry + (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 21: google.api.LaunchStage + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage - 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings - 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings - 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings - 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings - 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings - 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings - 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings - 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings - 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings - 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization - 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings - 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry - 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures - 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 33, // [33:33] is the sub-list for method output_type - 33, // [33:33] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 29, // [29:33] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration + 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry + 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 35, // [35:35] is the sub-list for method output_type + 35, // [35:35] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 31, // [31:35] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1892,7 +2043,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelectiveGapicGeneration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PythonSettings_ExperimentalFeatures); i { case 0: return &v.state @@ -1904,7 +2067,7 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1923,7 +2086,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 17, + NumMessages: 19, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index ffb5838cb18..c93b4f52487 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -663,14 +663,14 @@ var file_google_api_http_proto_rawDesc = []byte{ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x67, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, - 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, + 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index b5db279aebf..a1c543a9487 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -556,15 +556,14 @@ var file_google_api_resource_proto_rawDesc = []byte{ 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6b, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index 1d8397b02b4..2b54db30456 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -69,7 +69,7 @@ const ( // The routing header consists of one or multiple key-value pairs. Every key // and value must be percent-encoded, and joined together in the format of // `key1=value1&key2=value2`. -// In the examples below I am skipping the percent-encoding for readablity. +// The examples below skip the percent-encoding for readability. // // # Example 1 // diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index e7d3805e365..f388426b08f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,14 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, + 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3e562182792..3cd9a5bb8e6 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -80,11 +80,12 @@ type ErrorInfo struct { Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + // ideally be lowerCamelCase. Also, they must be limited to 64 characters in // length. When identifying the current value of an exceeded limit, the units // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // `{"instanceLimit": "100/request"}`, should be returned as, + // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of // instances that can be created in a single (batch) request. Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -870,6 +871,16 @@ type BadRequest_FieldViolation struct { Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The reason of the field-level error. This is a constant value that + // identifies the proximate cause of the field-level error. It should + // uniquely identify the type of the FieldViolation within the scope of the + // google.rpc.ErrorInfo.domain. This should be at most 63 + // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, + // which represents UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Provides a localized error message for field-level errors that is safe to + // return to the API consumer. + LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"` } func (x *BadRequest_FieldViolation) Reset() { @@ -918,6 +929,20 @@ func (x *BadRequest_FieldViolation) GetDescription() string { return "" } +func (x *BadRequest_FieldViolation) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage { + if x != nil { + return x.LocalizedMessage + } + return nil +} + // Describes a URL link. type Help_Link struct { state protoimpl.MessageState @@ -1026,51 +1051,57 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, - 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, - 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, - 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, - 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, + 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, + 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, + 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, + 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, + 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1111,11 +1142,12 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{ 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 3a2092f1056..c9b343c7156 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,17 +73,6 @@ func unregisterForTesting(name string) { delete(m, name) } -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -106,57 +95,6 @@ func Get(name string) Builder { return nil } -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully close, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which may be - // called when the Producer is no longer needed. Otherwise the producer - // will automatically be closed upon connection loss or subchannel close. - // Should only be called on a SubConn in state Ready. Otherwise the - // producer will be unable to create streams. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -191,6 +129,13 @@ type State struct { // brand new implementation of this interface. For the situations like // testing, the new implementation should embed this interface. This allows // gRPC to add new methods to this interface. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing polices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a +// ClientConn passed to Builder.Build() by embedding it in their +// implementations. An embedded ClientConn must never be nil, or runtime panics +// will occur. type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. @@ -229,6 +174,17 @@ type ClientConn interface { // // Deprecated: Use the Target field in the BuildOptions instead. Target() string + + // MetricsRecorder provides the metrics recorder that balancers can use to + // record metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this method. The returned + // MetricsRecorder is guaranteed to never be nil. + MetricsRecorder() estats.MetricsRecorder + + // EnforceClientConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceClientConnEmbedding } // BuildOptions contains additional information for Build. @@ -260,10 +216,6 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target - // MetricsRecorder is the metrics recorder that balancers can use to record - // metrics. Balancer implementations which do not register metrics on - // metrics registry and record on them can ignore this field. - MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -424,18 +376,6 @@ type ExitIdler interface { ExitIdle() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address -} - // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -448,22 +388,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Build also returns a close function that will be called when all - // references to the Producer have been given up for a SubConn, or when a - // connectivity state change occurs on the SubConn. The close function - // should always block until all asynchronous cleanup work is completed. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go new file mode 100644 index 00000000000..421c4fecc99 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -0,0 +1,358 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package endpointsharding implements a load balancing policy that manages +// homogeneous child policies each owning a single endpoint. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package endpointsharding + +import ( + "errors" + rand "math/rand/v2" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +// ChildState is the balancer state of a child along with the endpoint which +// identifies the child balancer. +type ChildState struct { + Endpoint resolver.Endpoint + State balancer.State + + // Balancer exposes only the ExitIdler interface of the child LB policy. + // Other methods of the child policy are called only by endpointsharding. + Balancer balancer.ExitIdler +} + +// Options are the options to configure the behaviour of the +// endpointsharding balancer. +type Options struct { + // DisableAutoReconnect allows the balancer to keep child balancer in the + // IDLE state until they are explicitly triggered to exit using the + // ChildState obtained from the endpointsharding picker. When set to false, + // the endpointsharding balancer will automatically call ExitIdle on child + // connections that report IDLE. + DisableAutoReconnect bool +} + +// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same +// type as the balancer.Builder.Build method. +type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer + +// NewBalancer returns a load balancing policy that manages homogeneous child +// policies each owning a single endpoint. The endpointsharding balancer +// forwards the LoadBalancingConfig in ClientConn state updates to its children. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer { + es := &endpointSharding{ + cc: cc, + bOpts: opts, + esOpts: esOpts, + childBuilder: childBuilder, + } + es.children.Store(resolver.NewEndpointMap()) + return es +} + +// endpointSharding is a balancer that wraps child balancers. It creates a child +// balancer with child config for every unique Endpoint received. It updates the +// child states on any update from parent or child. +type endpointSharding struct { + cc balancer.ClientConn + bOpts balancer.BuildOptions + esOpts Options + childBuilder ChildBuilderFunc + + // childMu synchronizes calls to any single child. It must be held for all + // calls into a child. To avoid deadlocks, do not acquire childMu while + // holding mu. + childMu sync.Mutex + children atomic.Pointer[resolver.EndpointMap] // endpoint -> *balancerWrapper + + // inhibitChildUpdates is set during UpdateClientConnState/ResolverError + // calls (calls to children will each produce an update, only want one + // update). + inhibitChildUpdates atomic.Bool + + // mu synchronizes access to the state stored in balancerWrappers in the + // children field. mu must not be held during calls into a child since + // synchronous calls back from the child may require taking mu, causing a + // deadlock. To avoid deadlocks, do not acquire childMu while holding mu. + mu sync.Mutex +} + +// UpdateClientConnState creates a child for new endpoints and deletes children +// for endpoints that are no longer present. It also updates all the children, +// and sends a single synchronous update of the childrens' aggregated state at +// the end of the UpdateClientConnState operation. If any endpoint has no +// addresses it will ignore that endpoint. Otherwise, returns first error found +// from a child, but fully processes the new update. +func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error { + es.childMu.Lock() + defer es.childMu.Unlock() + + es.inhibitChildUpdates.Store(true) + defer func() { + es.inhibitChildUpdates.Store(false) + es.updateState() + }() + var ret error + + children := es.children.Load() + newChildren := resolver.NewEndpointMap() + + // Update/Create new children. + for _, endpoint := range state.ResolverState.Endpoints { + if _, ok := newChildren.Get(endpoint); ok { + // Endpoint child was already created, continue to avoid duplicate + // update. + continue + } + var childBalancer *balancerWrapper + if val, ok := children.Get(endpoint); ok { + childBalancer = val.(*balancerWrapper) + // Endpoint attributes may have changed, update the stored endpoint. + es.mu.Lock() + childBalancer.childState.Endpoint = endpoint + es.mu.Unlock() + } else { + childBalancer = &balancerWrapper{ + childState: ChildState{Endpoint: endpoint}, + ClientConn: es.cc, + es: es, + } + childBalancer.childState.Balancer = childBalancer + childBalancer.child = es.childBuilder(childBalancer, es.bOpts) + } + newChildren.Set(endpoint, childBalancer) + if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{ + BalancerConfig: state.BalancerConfig, + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{endpoint}, + Attributes: state.ResolverState.Attributes, + }, + }); err != nil && ret == nil { + // Return first error found, and always commit full processing of + // updating children. If desired to process more specific errors + // across all endpoints, caller should make these specific + // validations, this is a current limitation for simplicity sake. + ret = err + } + } + // Delete old children that are no longer present. + for _, e := range children.Keys() { + child, _ := children.Get(e) + if _, ok := newChildren.Get(e); !ok { + child.(*balancerWrapper).closeLocked() + } + } + es.children.Store(newChildren) + if newChildren.Len() == 0 { + return balancer.ErrBadResolverState + } + return ret +} + +// ResolverError forwards the resolver error to all of the endpointSharding's +// children and sends a single synchronous update of the childStates at the end +// of the ResolverError operation. +func (es *endpointSharding) ResolverError(err error) { + es.childMu.Lock() + defer es.childMu.Unlock() + es.inhibitChildUpdates.Store(true) + defer func() { + es.inhibitChildUpdates.Store(false) + es.updateState() + }() + children := es.children.Load() + for _, child := range children.Values() { + child.(*balancerWrapper).resolverErrorLocked(err) + } +} + +func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { + // UpdateSubConnState is deprecated. +} + +func (es *endpointSharding) Close() { + es.childMu.Lock() + defer es.childMu.Unlock() + children := es.children.Load() + for _, child := range children.Values() { + child.(*balancerWrapper).closeLocked() + } +} + +// updateState updates this component's state. It sends the aggregated state, +// and a picker with round robin behavior with all the child states present if +// needed. +func (es *endpointSharding) updateState() { + if es.inhibitChildUpdates.Load() { + return + } + var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker + + es.mu.Lock() + defer es.mu.Unlock() + + children := es.children.Load() + childStates := make([]ChildState, 0, children.Len()) + + for _, child := range children.Values() { + bw := child.(*balancerWrapper) + childState := bw.childState + childStates = append(childStates, childState) + childPicker := childState.State.Picker + switch childState.State.ConnectivityState { + case connectivity.Ready: + readyPickers = append(readyPickers, childPicker) + case connectivity.Connecting: + connectingPickers = append(connectingPickers, childPicker) + case connectivity.Idle: + idlePickers = append(idlePickers, childPicker) + case connectivity.TransientFailure: + transientFailurePickers = append(transientFailurePickers, childPicker) + // connectivity.Shutdown shouldn't appear. + } + } + + // Construct the round robin picker based off the aggregated state. Whatever + // the aggregated state, use the pickers present that are currently in that + // state only. + var aggState connectivity.State + var pickers []balancer.Picker + if len(readyPickers) >= 1 { + aggState = connectivity.Ready + pickers = readyPickers + } else if len(connectingPickers) >= 1 { + aggState = connectivity.Connecting + pickers = connectingPickers + } else if len(idlePickers) >= 1 { + aggState = connectivity.Idle + pickers = idlePickers + } else if len(transientFailurePickers) >= 1 { + aggState = connectivity.TransientFailure + pickers = transientFailurePickers + } else { + aggState = connectivity.TransientFailure + pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))} + } // No children (resolver error before valid update). + p := &pickerWithChildStates{ + pickers: pickers, + childStates: childStates, + next: uint32(rand.IntN(len(pickers))), + } + es.cc.UpdateState(balancer.State{ + ConnectivityState: aggState, + Picker: p, + }) +} + +// pickerWithChildStates delegates to the pickers it holds in a round robin +// fashion. It also contains the childStates of all the endpointSharding's +// children. +type pickerWithChildStates struct { + pickers []balancer.Picker + childStates []ChildState + next uint32 +} + +func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + nextIndex := atomic.AddUint32(&p.next, 1) + picker := p.pickers[nextIndex%uint32(len(p.pickers))] + return picker.Pick(info) +} + +// ChildStatesFromPicker returns the state of all the children managed by the +// endpoint sharding balancer that created this picker. +func ChildStatesFromPicker(picker balancer.Picker) []ChildState { + p, ok := picker.(*pickerWithChildStates) + if !ok { + return nil + } + return p.childStates +} + +// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by +// endpoint, and persists recent child balancer state. +type balancerWrapper struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + + // child contains the wrapped balancer. Access its methods only through + // methods on balancerWrapper to ensure proper synchronization + child balancer.Balancer + balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns + + es *endpointSharding + + // Access to the following fields is guarded by es.mu. + + childState ChildState + isClosed bool +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + bw.es.mu.Lock() + bw.childState.State = state + bw.es.mu.Unlock() + if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect { + bw.ExitIdle() + } + bw.es.updateState() +} + +// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to +// avoid deadlocks due to synchronous balancer state updates. +func (bw *balancerWrapper) ExitIdle() { + if ei, ok := bw.child.(balancer.ExitIdler); ok { + go func() { + bw.es.childMu.Lock() + if !bw.isClosed { + ei.ExitIdle() + } + bw.es.childMu.Unlock() + }() + } +} + +// updateClientConnStateLocked delivers the ClientConnState to the child +// balancer. Callers must hold the child mutex of the parent endpointsharding +// balancer. +func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error { + return bw.child.UpdateClientConnState(ccs) +} + +// closeLocked closes the child balancer. Callers must hold the child mutext of +// the parent endpointsharding balancer. +func (bw *balancerWrapper) closeLocked() { + bw.child.Close() + bw.isClosed = true +} + +func (bw *balancerWrapper) resolverErrorLocked(err error) { + bw.child.ResolverError(err) +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go index c5197894584..7d66cb491c4 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -18,7 +18,18 @@ // Package internal contains code internal to the pickfirst package. package internal -import "math/rand" +import ( + rand "math/rand/v2" + "time" +) -// RandShuffle pseudo-randomizes the order of addresses. -var RandShuffle = rand.Shuffle +var ( + // RandShuffle pseudo-randomizes the order of addresses. + RandShuffle = rand.Shuffle + // TimeAfterFunc allows mocking the timer for testing connection delay + // related functionality. + TimeAfterFunc = func(d time.Duration, f func()) func() { + timer := time.AfterFunc(d, f) + return func() { timer.Stop() } + } +) diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index e069346a756..ea8899818c2 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,7 +23,7 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 985b6edc7f4..113181e6b35 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -29,11 +29,15 @@ import ( "encoding/json" "errors" "fmt" + "net" + "net/netip" "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -50,26 +54,77 @@ func init() { balancer.Register(pickfirstBuilder{}) } +type ( + // enableHealthListenerKeyType is a unique key type used in resolver + // attributes to indicate whether the health listener usage is enabled. + enableHealthListenerKeyType struct{} + // managedByPickfirstKeyType is an attribute key type to inform Outlier + // Detection that the generic health listener is being used. + // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when + // implementing the dualstack design. This is a hack. Once Dualstack is + // completed, outlier detection will stop sending ejection updates through + // the connectivity listener. + managedByPickfirstKeyType struct{} +) + var ( logger = grpclog.Component("pick-first-leaf-lb") // Name is the name of the pick_first_leaf balancer. // It is changed to "pick_first" in init() if this balancer is to be // registered as the default pickfirst. - Name = "pick_first_leaf" + Name = "pick_first_leaf" + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "disconnection", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) +) + +const ( + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond ) -// TODO: change to pick-first when this becomes the default pick_first policy. -const logPrefix = "[pick-first-leaf-lb %p] " +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 +) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{ - cc: cc, - addressList: addressList{}, - subConns: resolver.NewAddressMap(), - state: connectivity.Connecting, - mu: sync.Mutex{}, + cc: cc, + target: bo.Target.String(), + metricsRecorder: cc.MetricsRecorder(), + + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -87,6 +142,24 @@ func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalan return cfg, nil } +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + +// IsManagedByPickfirst returns whether an address belongs to a SubConn +// managed by the pickfirst LB policy. +// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable +// outlier_detection via the with connectivity listener when using pick_first. +// Once Dualstack changes are complete, all SubConns will be created by +// pick_first and outlier detection will only use the health listener for +// ejection. This hack can then be removed. +func IsManagedByPickfirst(addr resolver.Address) bool { + return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -104,14 +177,20 @@ type scData struct { subConn balancer.SubConn addr resolver.Address - state connectivity.State - lastErr error + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) sd := &scData{ - state: connectivity.Idle, - addr: addr, + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ StateListener: func(state balancer.SubConnState) { @@ -128,19 +207,25 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { type pickfirstBalancer struct { // The following fields are initialized at build time and read-only after // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil // The mutex is used to ensure synchronization of updates triggered // from the idle picker and the already serialized resolver, // SubConn state updates. - mu sync.Mutex + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. state connectivity.State // scData for active subonns mapped by address. - subConns *resolver.AddressMap - addressList addressList - firstPass bool - numTF int + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } // ResolverError is called by the ClientConn when the name resolver produces @@ -166,7 +251,7 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { return } - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) @@ -175,15 +260,16 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { b.mu.Lock() defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // Cleanup state pertaining to the previous resolver state. // Treat an empty address list like an error by calling b.ResolverError. - b.state = connectivity.TransientFailure b.closeSubConnsLocked() b.addressList.updateAddrs(nil) b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) @@ -206,9 +292,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // "Flatten the list by concatenating the ordered list of addresses for // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8305 section 4." - A61 - // TODO: support the above language. newAddrs = append(newAddrs, endpoint.Addresses...) } } else { @@ -231,16 +314,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Not de-duplicating would result in attempting to connect to the same // SubConn multiple times in the same pass. We don't want this. newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) - // Since we have a new set of addresses, we are again at first pass. - b.firstPass = true - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready b.addressList.updateAddrs(newAddrs) - if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } @@ -252,18 +336,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // we should still enter CONNECTING because the sticky TF behaviour // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported // due to connectivity failures. - if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { // Start connection attempt at first address. - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ + b.forceUpdateConcludedStateLocked(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - b.requestConnectionLocked() + b.startFirstPassLocked() } else if b.state == connectivity.TransientFailure { // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until // we're READY. See A62. - b.requestConnectionLocked() + b.startFirstPassLocked() } return nil } @@ -278,6 +361,7 @@ func (b *pickfirstBalancer) Close() { b.mu.Lock() defer b.mu.Unlock() b.closeSubConnsLocked() + b.cancelConnectionTimer() b.state = connectivity.Shutdown } @@ -287,10 +371,19 @@ func (b *pickfirstBalancer) Close() { func (b *pickfirstBalancer) ExitIdle() { b.mu.Lock() defer b.mu.Unlock() - if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { - b.firstPass = true - b.requestConnectionLocked() + if b.state == connectivity.Idle { + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.(*scData).connectionFailedInFirstPass = false } + b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { @@ -314,6 +407,70 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { return retAddrs } +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + // reconcileSubConnsLocked updates the active subchannels based on a new address // list from the resolver. It does this by: // - closing subchannels: any existing subchannels associated with addresses @@ -342,6 +499,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // shutdownRemainingLocked shuts down remaining subConns. Called when a subConn // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() for _, v := range b.subConns.Values() { sd := v.(*scData) if sd.subConn != selected.subConn { @@ -382,46 +540,89 @@ func (b *pickfirstBalancer) requestConnectionLocked() { } scd := sd.(*scData) - switch scd.state { + switch scd.rawConnectivityState { case connectivity.Idle: scd.subConn.Connect() + b.scheduleNextConnectionLocked() + return case connectivity.TransientFailure: - // Try the next address. + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + scd.connectionFailedInFirstPass = true lastErr = scd.lastErr continue - case connectivity.Ready: - // Should never happen. - b.logger.Errorf("Requesting a connection even though we have a READY SubConn") - case connectivity.Shutdown: - // Should never happen. - b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") case connectivity.Connecting: - // Wait for the SubConn to report success or failure. + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + return + } - return } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass. - b.endFirstPassLocked(lastErr) + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) } func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { b.mu.Lock() defer b.mu.Unlock() - oldState := sd.state - sd.state = newState.ConnectivityState + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState // Previously relevant SubConns can still callback with state updates. // To prevent pickers from returning these obsolete SubConns, this logic // is included to check if the current list of active SubConns includes this // SubConn. - if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + if !b.isActiveSCData(sd) { return } if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown return } + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) b.shutdownRemainingLocked(sd) if !b.addressList.seekTo(sd.addr) { // This should not fail as we should have only one SubConn after @@ -429,10 +630,30 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } - b.state = connectivity.Ready - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) }) return } @@ -443,13 +664,24 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // a transport is successfully created, but the connection fails // before the SubConn can send the notification for READY. We treat // this as a successful connection and transition to IDLE. - if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { // Once a transport fails, the balancer enters IDLE and starts from // the first address when the picker is used. b.shutdownRemainingLocked(sd) - b.state = connectivity.Idle + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) b.addressList.reset() - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.Idle, Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, }) @@ -459,32 +691,35 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub if b.firstPass { switch newState.ConnectivityState { case connectivity.Connecting: - // The balancer can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in // TRANSIENT_FAILURE until it's READY. See A62. - // If the balancer is already in CONNECTING, no update is needed. - if b.state == connectivity.Idle { - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) } case connectivity.TransientFailure: sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. We ignore such updates. - - if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - return - } - if b.addressList.increment() { - b.requestConnectionLocked() - return + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } } - // End of the first pass. - b.endFirstPassLocked(newState.ConnectionError) + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) } return } @@ -495,7 +730,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.numTF = (b.numTF + 1) % b.subConns.Len() sd.lastErr = newState.ConnectionError if b.numTF%b.subConns.Len() == 0 { - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: newState.ConnectionError}, }) @@ -509,24 +744,95 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub } } -func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if !sd.connectionFailedInFirstPass { + return + } + } b.firstPass = false - b.numTF = 0 - b.state = connectivity.TransientFailure - - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. for _, v := range b.subConns.Values() { sd := v.(*scData) - if sd.state == connectivity.Idle { + if sd.rawConnectivityState == connectivity.Idle { sd.subConn.Connect() } } } +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + case connectivity.TransientFailure: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) + } +} + +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) +} + +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) +} + type picker struct { result balancer.PickResult err error @@ -583,15 +889,6 @@ func (al *addressList) currentAddress() resolver.Address { return al.addresses[al.idx] } -// first returns the first address in the list. If the list is empty, it returns -// an empty address instead. -func (al *addressList) first() resolver.Address { - if len(al.addresses) == 0 { - return resolver.Address{} - } - return al.addresses[0] -} - func (al *addressList) reset() { al.idx = 0 } @@ -614,6 +911,16 @@ func (al *addressList) seekTo(needle resolver.Address) bool { return false } +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + // equalAddressIgnoringBalAttributes returns true is a and b are considered // equal. This is different from the Equal method on the resolver.Address type // which considers all fields to determine equality. Here, we only consider diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 260255d31b6..35da5d1ec9d 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,13 @@ package roundrobin import ( - "math/rand" - "sync/atomic" + "fmt" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/endpointsharding" + "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" ) // Name is the name of round_robin balancer. @@ -35,47 +36,44 @@ const Name = "round_robin" var logger = grpclog.Component("roundrobin") -// newBuilder creates a new roundrobin balancer builder. -func newBuilder() balancer.Builder { - return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) -} - func init() { - balancer.Register(newBuilder()) + balancer.Register(builder{}) } -type rrPickerBuilder struct{} +type builder struct{} -func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: Build called with info: %v", info) - if len(info.ReadySCs) == 0 { - return base.NewErrPicker(balancer.ErrNoSubConnAvailable) - } - scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) - for sc := range info.ReadySCs { - scs = append(scs, sc) - } - return &rrPicker{ - subConns: scs, - // Start at a random index, as the same RR balancer rebuilds a new - // picker when SubConn states change, and we don't want to apply excess - // load to the first server in the list. - next: uint32(rand.Intn(len(scs))), +func (bb builder) Name() string { + return Name +} + +func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + childBuilder := balancer.Get(pickfirstleaf.Name).Build + bal := &rrBalancer{ + cc: cc, + Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), } + bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal)) + bal.logger.Infof("Created") + return bal } -type rrPicker struct { - // subConns is the snapshot of the roundrobin balancer when this picker was - // created. The slice is immutable. Each Get() will do a round robin - // selection from it and return the selected SubConn. - subConns []balancer.SubConn - next uint32 +type rrBalancer struct { + balancer.Balancer + cc balancer.ClientConn + logger *internalgrpclog.PrefixLogger } -func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - subConnsLen := uint32(len(p.subConns)) - nextIndex := atomic.AddUint32(&p.next, 1) +func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ + // Enable the health listener in pickfirst children for client side health + // checks and outlier detection, if configured. + ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + }) +} - sc := p.subConns[nextIndex%subConnsLen] - return balancer.PickResult{SubConn: sc}, nil +func (b *rrBalancer) ExitIdle() { + // Should always be ok, as child is endpoint sharding. + if ei, ok := b.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + } } diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go new file mode 100644 index 00000000000..9ee44d4af08 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/subconn.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/resolver" +) + +// A SubConn represents a single connection to a gRPC backend service. +// +// All SubConns start in IDLE, and will not try to connect. To trigger a +// connection attempt, Balancers must call Connect. +// +// If the connection attempt fails, the SubConn will transition to +// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the +// connection attempt succeeds, it will transition to READY. +// +// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE. +// +// If a connection re-enters IDLE, Balancers must call Connect again to trigger +// a new connection attempt. +// +// Each SubConn contains a list of addresses. gRPC will try to connect to the +// addresses in sequence, and stop trying the remainder once the first +// connection is successful. However, this behavior is deprecated. SubConns +// should only use a single address. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing polices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a SubConn +// returned by ClientConn.NewSubConn() by embedding it in their implementations. +// An embedded SubConn must never be nil, or runtime panics will occur. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() + // RegisterHealthListener registers a health listener that receives health + // updates for a Ready SubConn. Only one health listener can be registered + // at a time. A health listener should be registered each time the SubConn's + // connectivity state changes to READY. Registering a health listener when + // the connectivity state is not READY may result in undefined behaviour. + // This method must not be called synchronously while handling an update + // from a previously registered health listener. + RegisterHealthListener(func(SubConnState)) + // EnforceSubConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceSubConnEmbedding +} + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 2a4f2878aef..948a21ef683 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" @@ -34,7 +35,15 @@ import ( "google.golang.org/grpc/status" ) -var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) +var ( + setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // noOpRegisterHealthListenerFn is used when client side health checking is + // disabled. It sends a single READY update on the registered listener. + noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() { + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + return func() {} + } +) // ccBalancerWrapper sits between the ClientConn and the Balancer. // @@ -51,6 +60,7 @@ var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnSt // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { + internal.EnforceClientConnEmbedding // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. cc *ClientConn @@ -84,7 +94,6 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, - MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -93,6 +102,10 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { return ccb } +func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder { + return ccb.cc.metricsRecorderList +} + // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. This is always executed from the serializer, so // it is safe to call into the balancer here. @@ -189,6 +202,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, + healthData: newHealthData(connectivity.Idle), } ac.acbw = acbw return acbw, nil @@ -254,12 +268,39 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + internal.EnforceSubConnEmbedding ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) producersMu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer + + // Access to healthData is protected by healthMu. + healthMu sync.Mutex + // healthData is stored as a pointer to detect when the health listener is + // dropped or updated. This is required as closures can't be compared for + // equality. + healthData *healthData +} + +// healthData holds data related to health state reporting. +type healthData struct { + // connectivityState stores the most recent connectivity state delivered + // to the LB policy. This is stored to avoid sending updates when the + // SubConn has already exited connectivity state READY. + connectivityState connectivity.State + // closeHealthProducer stores function to close the ref counted health + // producer. The health producer is automatically closed when the SubConn + // state changes. + closeHealthProducer func() +} + +func newHealthData(s connectivity.State) *healthData { + return &healthData{ + connectivityState: s, + closeHealthProducer: func() {}, + } } // updateState is invoked by grpc to push a subConn state update to the @@ -279,6 +320,24 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } + // Invalidate the health listener by updating the healthData. + acbw.healthMu.Lock() + // A race may occur if a health listener is registered soon after the + // connectivity state is set but before the stateListener is called. + // Two cases may arise: + // 1. The new state is not READY: RegisterHealthListener has checks to + // ensure no updates are sent when the connectivity state is not + // READY. + // 2. The new state is READY: This means that the old state wasn't Ready. + // The RegisterHealthListener API mentions that a health listener + // must not be registered when a SubConn is not ready to avoid such + // races. When this happens, the LB policy would get health updates + // on the old listener. When the LB policy registers a new listener + // on receiving the connectivity update, the health updates will be + // sent to the new health listener. + acbw.healthData = newHealthData(scs.ConnectivityState) + acbw.healthMu.Unlock() + acbw.stateListener(scs) }) } @@ -361,7 +420,7 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( } acbw.producersMu.Unlock() } - return pData.producer, grpcsync.OnceFunc(unref) + return pData.producer, sync.OnceFunc(unref) } func (acbw *acBalancerWrapper) closeProducers() { @@ -373,3 +432,89 @@ func (acbw *acBalancerWrapper) closeProducers() { delete(acbw.producers, pb) } } + +// healthProducerRegisterFn is a type alias for the health producer's function +// for registering listeners. +type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func() + +// healthListenerRegFn returns a function to register a listener for health +// updates. If client side health checks are disabled, the registered listener +// will get a single READY (raw connectivity state) update. +// +// Client side health checking is enabled when all the following +// conditions are satisfied: +// 1. Health checking is not disabled using the dial option. +// 2. The health package is imported. +// 3. The health check config is present in the service config. +func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() { + if acbw.ccb.cc.dopts.disableHealthCheck { + return noOpRegisterHealthListenerFn + } + regHealthLisFn := internal.RegisterClientHealthCheckListener + if regHealthLisFn == nil { + // The health package is not imported. + return noOpRegisterHealthListenerFn + } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } + return func(ctx context.Context, listener func(balancer.SubConnState)) func() { + return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener) + } +} + +// RegisterHealthListener accepts a health listener from the LB policy. It sends +// updates to the health listener as long as the SubConn's connectivity state +// doesn't change and a new health listener is not registered. To invalidate +// the currently registered health listener, acbw updates the healthData. If a +// nil listener is registered, the active health listener is dropped. +func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + acbw.healthData.closeHealthProducer() + // listeners should not be registered when the connectivity state + // isn't Ready. This may happen when the balancer registers a listener + // after the connectivityState is updated, but before it is notified + // of the update. + if acbw.healthData.connectivityState != connectivity.Ready { + return + } + // Replace the health data to stop sending updates to any previously + // registered health listeners. + hd := newHealthData(connectivity.Ready) + acbw.healthData = hd + if listener == nil { + return + } + + registerFn := acbw.healthListenerRegFn() + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Don't send updates if a new listener is registered. + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + // Serialize the health updates from the health producer with + // other calls into the LB policy. + listenerWrapper := func(scs balancer.SubConnState) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + listener(scs) + }) + } + + hd.closeHealthProducer = registerFn(ctx, listenerWrapper) + }) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 55bffaa77ef..b2f8fc7f436 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.4 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -31,6 +31,7 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -233,10 +234,7 @@ func (Address_Type) EnumDescriptor() ([]byte, []int) { // Log entry we store in binary logs type GrpcLogEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The timestamp of the binary log message Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate @@ -255,7 +253,7 @@ type GrpcLogEntry struct { // The logger uses one of the following fields to record the payload, // according to the type of the log entry. // - // Types that are assignable to Payload: + // Types that are valid to be assigned to Payload: // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader @@ -269,16 +267,16 @@ type GrpcLogEntry struct { // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in // the case of trailers-only. On server side, peer is always // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcLogEntry) String() string { @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,37 +337,45 @@ func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { return GrpcLogEntry_LOGGER_UNKNOWN } -func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { - if m != nil { - return m.Payload +func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if x != nil { + return x.Payload } return nil } func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { - return x.ClientHeader + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } } return nil } func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { - return x.ServerHeader + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } } return nil } func (x *GrpcLogEntry) GetMessage() *Message { - if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { - return x.Message + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_Message); ok { + return x.Message + } } return nil } func (x *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { - return x.Trailer + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } } return nil } @@ -418,10 +424,7 @@ func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} type ClientHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The name of the RPC method, which looks something like: @@ -435,16 +438,16 @@ type ClientHeader struct { // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout - Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientHeader) Reset() { *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientHeader) String() string { @@ -455,7 +458,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -499,21 +502,18 @@ func (x *ClientHeader) GetTimeout() *durationpb.Duration { } type ServerHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerHeader) Reset() { *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHeader) String() string { @@ -524,7 +524,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -547,10 +547,7 @@ func (x *ServerHeader) GetMetadata() *Metadata { } type Trailer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The gRPC status code. @@ -561,15 +558,15 @@ type Trailer struct { // The value of the 'grpc-status-details-bin' metadata key. If // present, this is always an encoded 'google.rpc.Status' message. StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Trailer) Reset() { *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Trailer) String() string { @@ -580,7 +577,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -625,24 +622,21 @@ func (x *Trailer) GetStatusDetails() []byte { // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE type Message struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Length of the message. It may not be the same as the length of the // data field, as the logging payload can be truncated or omitted. Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Message) Reset() { *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Message) String() string { @@ -653,7 +647,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -704,20 +698,17 @@ func (x *Message) GetData() []byte { // header is just a normal metadata key. // The pair will not count towards the size limit. type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` unknownFields protoimpl.UnknownFields - - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Metadata) Reset() { *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metadata) String() string { @@ -728,7 +719,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -752,21 +743,18 @@ func (x *Metadata) GetEntry() []*MetadataEntry { // A metadata key value pair type MetadataEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataEntry) String() string { @@ -777,7 +765,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -808,23 +796,20 @@ func (x *MetadataEntry) GetValue() []byte { // Address information type Address struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -835,7 +820,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -873,7 +858,7 @@ func (x *Address) GetIpPort() uint32 { var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor -var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ +var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, @@ -999,16 +984,16 @@ var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once - file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc + file_grpc_binlog_v1_binarylog_proto_rawDescData []byte ) func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { - file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc))) }) return file_grpc_binlog_v1_binarylog_proto_rawDescData } @@ -1057,104 +1042,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), @@ -1165,7 +1052,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)), NumEnums: 3, NumMessages: 8, NumExtensions: 0, @@ -1177,7 +1064,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, }.Build() File_grpc_binlog_v1_binarylog_proto = out.File - file_grpc_binlog_v1_binarylog_proto_rawDesc = nil file_grpc_binlog_v1_binarylog_proto_goTypes = nil file_grpc_binlog_v1_binarylog_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 19763f8eddf..a319ef97944 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -118,12 +118,26 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // NewClient creates a new gRPC "channel" for the target URI provided. No I/O // is performed. Use of the ClientConn for RPCs will automatically cause it to -// connect. Connect may be used to manually create a connection, but for most -// users this is unnecessary. +// connect. The Connect method may be called to manually create a connection, +// but for most users this should be unnecessary. // // The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns -// resolver, a "dns:///" prefix should be applied to the target. +// https://github.com/grpc/grpc/blob/master/doc/naming.md. E.g. to use the dns +// name resolver, a "dns:///" prefix may be applied to the target. The default +// name resolver will be used if no scheme is detected, or if the parsed scheme +// is not a registered name resolver. The default resolver is "dns" but can be +// overridden using the resolver package's SetDefaultScheme. +// +// Examples: +// +// - "foo.googleapis.com:8080" +// - "dns:///foo.googleapis.com:8080" +// - "dns:///foo.googleapis.com" +// - "dns:///10.0.0.213:8080" +// - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443" +// - "dns://8.8.8.8/foo.googleapis.com:8080" +// - "dns://8.8.8.8/foo.googleapis.com" +// - "zookeeper://zk.example.com:9900/example_service" // // The DialOptions returned by WithBlock, WithTimeout, // WithReturnConnectionError, and FailOnNonTempDialError are ignored by this @@ -181,7 +195,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) } - cc.mkp = cc.dopts.copts.KeepaliveParams + cc.keepaliveParams = cc.dopts.copts.KeepaliveParams if err = cc.initAuthority(); err != nil { return nil, err @@ -225,7 +239,12 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { // At the end of this method, we kick the channel out of idle, rather than // waiting for the first rpc. - opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + // + // WithLocalDNSResolution dial option in `grpc.Dial` ensures that it + // preserves behavior: when default scheme passthrough is used, skip + // hostname resolution, when "dns" is used for resolution, perform + // resolution on the client. + opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...) cc, err := NewClient(target, opts...) if err != nil { return nil, err @@ -618,7 +637,7 @@ type ClientConn struct { balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. - mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway. // firstResolveEvent is used to track whether the name resolver sent us at // least one update. RPCs block on this event. May be accessed without mu // if we know we cannot be asked to enter idle mode while accessing it (e.g. @@ -775,10 +794,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } + balCfg := cc.sc.lbConfig bw := cc.balancerWrapper cc.mu.Unlock() @@ -870,7 +886,13 @@ func (cc *ClientConn) Target() string { return cc.target } -// CanonicalTarget returns the canonical target string of the ClientConn. +// CanonicalTarget returns the canonical target string used when creating cc. +// +// This always has the form "://[authority]/". For example: +// +// - "dns:///example.com:42" +// - "dns://8.8.8.8/example.com:42" +// - "unix:///path/to/socket" func (cc *ClientConn) CanonicalTarget() string { return cc.parsedTarget.String() } @@ -1213,8 +1235,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { case transport.GoAwayTooManyPings: v := 2 * ac.dopts.copts.KeepaliveParams.Time ac.cc.mu.Lock() - if v > ac.cc.mkp.Time { - ac.cc.mkp.Time = v + if v > ac.cc.keepaliveParams.Time { + ac.cc.keepaliveParams.Time = v } ac.cc.mu.Unlock() } @@ -1310,7 +1332,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c ac.mu.Lock() ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams ac.cc.mu.RUnlock() copts := ac.dopts.copts @@ -1374,7 +1396,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1448,7 +1470,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := ac.cc.dopts.healthCheckFunc + healthCheckFunc := internal.HealthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1480,7 +1502,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index e840858b77b..959c2f99d4a 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -71,7 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } - return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil + return mem.BufferSlice{mem.SliceBuffer(data)}, nil } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index e163a473df9..bd5fe22b6af 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -32,6 +32,8 @@ import ( "google.golang.org/grpc/internal/envconfig" ) +const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434" + var logger = grpclog.Component("credentials") // TLSInfo contains the auth information for a TLS authenticated connection. @@ -128,7 +130,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if np == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) } @@ -158,7 +160,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if cs.NegotiatedProtocol == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } else if logger.V(2) { logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 518692c3afb..405a2ffeb39 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -73,7 +73,7 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor + compressorV0 Compressor dc Decompressor bs internalbackoff.Strategy block bool @@ -87,7 +87,6 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool - healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -95,6 +94,8 @@ type dialOptions struct { idleTimeout time.Duration defaultScheme string maxCallAttempts int + enableLocalDNSResolution bool // Specifies if target hostnames should be resolved when proxying is enabled. + useProxy bool // Specifies if a server should be connected via proxy. } // DialOption configures how we set up the connection. @@ -257,7 +258,7 @@ func WithCodec(c Codec) DialOption { // Deprecated: use UseCompressor instead. Will be supported throughout 1.x. func WithCompressor(cp Compressor) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.cp = cp + o.compressorV0 = cp }) } @@ -378,7 +379,22 @@ func WithInsecure() DialOption { // later release. func WithNoProxy() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UseProxy = false + o.useProxy = false + }) +} + +// WithLocalDNSResolution forces local DNS name resolution even when a proxy is +// specified in the environment. By default, the server name is provided +// directly to the proxy as part of the CONNECT handshake. This is ignored if +// WithNoProxy is used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithLocalDNSResolution() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.enableLocalDNSResolution = true }) } @@ -429,6 +445,11 @@ func WithTimeout(d time.Duration) DialOption { // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // +// Note that gRPC by default performs name resolution on the target passed to +// NewClient. To bypass name resolution and cause the target string to be +// passed directly to the dialer here instead, use the "passthrough" resolver +// by specifying it in the target string, e.g. "passthrough:target". +// // Note: All supported releases of Go (as of December 2023) override the OS // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive // with OS defaults for keepalive time and interval, use a net.Dialer that sets @@ -445,10 +466,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -662,30 +679,20 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ ReadBufferSize: defaultReadBufSize, WriteBufferSize: defaultWriteBufSize, - UseProxy: true, UserAgent: grpcUA, BufferPool: mem.DefaultBufferPool(), }, - bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, - idleTimeout: 30 * time.Minute, - defaultScheme: "dns", - maxCallAttempts: defaultMaxCallAttempts, + bs: internalbackoff.DefaultExponential, + idleTimeout: 30 * time.Minute, + defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, + useProxy: true, + enableLocalDNSResolution: false, } } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index 1d827dd5d9d..ad75313a18e 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" ) func init() { @@ -34,7 +35,7 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. -var DefaultMetrics = NewMetrics() +var DefaultMetrics = stats.NewMetricSet() // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -42,7 +43,7 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. - Name Metric + Name string // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -154,27 +155,27 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. -var registeredMetrics = make(map[Metric]bool) +var registeredMetrics = make(map[string]bool) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. -var metricsRegistry = make(map[Metric]*MetricDescriptor) +var metricsRegistry = make(map[string]*MetricDescriptor) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. -func DescriptorForMetric(metric Metric) *MetricDescriptor { - return metricsRegistry[metric] +func DescriptorForMetric(metricName string) *MetricDescriptor { + return metricsRegistry[metricName] } -func registerMetric(name Metric, def bool) { - if registeredMetrics[name] { - logger.Fatalf("metric %v already registered", name) +func registerMetric(metricName string, def bool) { + if registeredMetrics[metricName] { + logger.Fatalf("metric %v already registered", metricName) } - registeredMetrics[name] = true + registeredMetrics[metricName] = true if def { - DefaultMetrics = DefaultMetrics.Add(name) + DefaultMetrics = DefaultMetrics.Add(metricName) } } @@ -256,8 +257,8 @@ func snapshotMetricsRegistryForTesting() func() { oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry - registeredMetrics = make(map[Metric]bool) - metricsRegistry = make(map[Metric]*MetricDescriptor) + registeredMetrics = make(map[string]bool) + metricsRegistry = make(map[string]*MetricDescriptor) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index 3221f7a633a..ee1423605ab 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,7 +19,7 @@ // Package stats contains experimental metrics/stats API's. package stats -import "maps" +import "google.golang.org/grpc/stats" // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { @@ -40,75 +40,15 @@ type MetricsRecorder interface { RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } -// Metric is an identifier for a metric. -type Metric string +// Metrics is an experimental legacy alias of the now-stable stats.MetricSet. +// Metrics will be deleted in a future release. +type Metrics = stats.MetricSet -// Metrics is a set of metrics to record. Once created, Metrics is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetrics instead. -type Metrics struct { - // metrics are the set of metrics to initialize. - metrics map[Metric]bool -} +// Metric was replaced by direct usage of strings. +type Metric = string -// NewMetrics returns a Metrics containing Metrics. +// NewMetrics is an experimental legacy alias of the now-stable +// stats.NewMetricSet. NewMetrics will be deleted in a future release. func NewMetrics(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *Metrics) Metrics() map[Metric]bool { - return m.metrics -} - -// Add adds the metrics to the metrics set and returns a new copy with the -// additional metrics. -func (m *Metrics) Add(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *Metrics) Join(metrics *Metrics) *Metrics { - newMetrics := make(map[Metric]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &Metrics{ - metrics: newMetrics, - } -} - -// Remove removes the metrics from the metrics set and returns a new copy with -// the metrics removed. -func (m *Metrics) Remove(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - delete(newMetrics, metric) - } - return &Metrics{ - metrics: newMetrics, - } + return stats.NewMetricSet(metrics...) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index 07df71e98a8..ed90060c3cb 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,6 +101,22 @@ var severityName = []string{ fatalLog: "FATAL", } +// sprintf is fmt.Sprintf. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintf = fmt.Sprintf + +// sprint is fmt.Sprint. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprint = fmt.Sprint + +// sprintln is fmt.Sprintln. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintln = fmt.Sprintln + +// exit is os.Exit. +// This var exists to make it possible to test functions calling os.Exit. +var exit = os.Exit + // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -111,7 +127,7 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + g.m[severity].Output(2, sevStr+": "+s) return } // TODO: we can also include the logging component, but that needs more @@ -123,55 +139,79 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } +func (g *loggerT) printf(severity int, format string, args ...any) { + // Note the discard check is duplicated in each print func, rather than in + // output, to avoid the expensive Sprint calls. + // De-duplicating this by moving to output would be a significant performance regression! + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintf(format, args...)) +} + +func (g *loggerT) print(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprint(v...)) +} + +func (g *loggerT) println(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintln(v...)) +} + func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) + g.print(infoLog, args...) } func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) + g.println(infoLog, args...) } func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) + g.printf(infoLog, format, args...) } func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) + g.print(warningLog, args...) } func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) + g.println(warningLog, args...) } func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) + g.printf(warningLog, format, args...) } func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) + g.print(errorLog, args...) } func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) + g.println(errorLog, args...) } func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) + g.printf(errorLog, format, args...) } func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) + g.print(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) + g.println(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) + g.printf(fatalLog, format, args...) + exit(1) } func (g *loggerT) V(l int) bool { @@ -186,19 +226,42 @@ type LoggerV2Config struct { FormatJSON bool } +// combineLoggers returns a combined logger for both higher & lower severity logs, +// or only one if the other is io.Discard. +// +// This uses io.Discard instead of io.MultiWriter when all loggers +// are set to io.Discard. Both this package and the standard log package have +// significant optimizations for io.Discard, which io.MultiWriter lacks (as of +// this writing). +func combineLoggers(lower, higher io.Writer) io.Writer { + if lower == io.Discard { + return higher + } + if higher == io.Discard { + return lower + } + return io.MultiWriter(lower, higher) +} + // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { - var m []*log.Logger flag := log.LstdFlags if c.FormatJSON { flag = 0 } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) + + warningW = combineLoggers(infoW, warningW) + errorW = combineLoggers(errorW, warningW) + + fatalW := errorW + + m := []*log.Logger{ + log.New(infoW, "", flag), + log.New(warningW, "", flag), + log.New(errorW, "", flag), + log.New(fatalW, "", flag), + } return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index d92335445f6..94177b05c2d 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.4 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -28,6 +28,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -90,20 +91,17 @@ func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { } type HealthCheckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` unknownFields protoimpl.UnknownFields - - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckRequest) String() string { @@ -114,7 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -137,20 +135,17 @@ func (x *HealthCheckRequest) GetService() string { } type HealthCheckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` unknownFields protoimpl.UnknownFields - - Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckResponse) String() string { @@ -161,7 +156,7 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -185,7 +180,7 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { var File_grpc_health_v1_health_proto protoreflect.FileDescriptor -var file_grpc_health_v1_health_proto_rawDesc = []byte{ +var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, @@ -214,23 +209,24 @@ var file_grpc_health_v1_health_proto_rawDesc = []byte{ 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x30, 0x01, 0x42, 0x70, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} + 0x68, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x56, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) var ( file_grpc_health_v1_health_proto_rawDescOnce sync.Once - file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc + file_grpc_health_v1_health_proto_rawDescData []byte ) func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { - file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) + file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc))) }) return file_grpc_health_v1_health_proto_rawDescData } @@ -260,37 +256,11 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)), NumEnums: 1, NumMessages: 2, NumExtensions: 0, @@ -302,7 +272,6 @@ func file_grpc_health_v1_health_proto_init() { MessageInfos: file_grpc_health_v1_health_proto_msgTypes, }.Build() File_grpc_health_v1_health_proto = out.File - file_grpc_health_v1_health_proto_rawDesc = nil file_grpc_health_v1_health_proto_goTypes = nil file_grpc_health_v1_health_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/health/producer.go b/vendor/google.golang.org/grpc/health/producer.go new file mode 100644 index 00000000000..f938e5790c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/producer.go @@ -0,0 +1,106 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/status" +) + +func init() { + producerBuilderSingleton = &producerBuilder{} + internal.RegisterClientHealthCheckListener = registerClientSideHealthCheckListener +} + +type producerBuilder struct{} + +var producerBuilderSingleton *producerBuilder + +// Build constructs and returns a producer and its cleanup function. +func (*producerBuilder) Build(cci any) (balancer.Producer, func()) { + p := &healthServiceProducer{ + cc: cci.(grpc.ClientConnInterface), + cancel: func() {}, + } + return p, func() { + p.mu.Lock() + defer p.mu.Unlock() + p.cancel() + } +} + +type healthServiceProducer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + cc grpc.ClientConnInterface + + mu sync.Mutex + cancel func() +} + +// registerClientSideHealthCheckListener accepts a listener to provide server +// health state via the health service. +func registerClientSideHealthCheckListener(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() { + pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton) + p := pr.(*healthServiceProducer) + p.mu.Lock() + defer p.mu.Unlock() + p.cancel() + if listener == nil { + return closeFn + } + + ctx, cancel := context.WithCancel(ctx) + p.cancel = cancel + + go p.startHealthCheck(ctx, sc, serviceName, listener) + return closeFn +} + +func (p *healthServiceProducer) startHealthCheck(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) { + newStream := func(method string) (any, error) { + return p.cc.NewStream(ctx, &grpc.StreamDesc{ServerStreams: true}, method) + } + + setConnectivityState := func(state connectivity.State, err error) { + listener(balancer.SubConnState{ + ConnectivityState: state, + ConnectionError: err, + }) + } + + // Call the function through the internal variable as tests use it for + // mocking. + err := internal.HealthCheckFunc(ctx, newStream, setConnectivityState, serviceName) + if err == nil { + return + } + if status.Code(err) == codes.Unimplemented { + logger.Errorf("Subchannel health check is unimplemented at server side, thus health check is disabled for SubConn %p", sc) + } else { + logger.Errorf("Health checking failed for SubConn %p: %v", sc, err) + } +} diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b15cf482d29..b6ae7f25850 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - "math/rand" + rand "math/rand/v2" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 73bb4c4ee9a..fbc1ca356ab 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -109,8 +109,9 @@ func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error return nil, errBalancerClosed } bw := &balancerWrapper{ - builder: builder, - gsb: gsb, + ClientConn: gsb.cc, + builder: builder, + gsb: gsb, lastState: balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), @@ -293,6 +294,7 @@ func (gsb *Balancer) Close() { // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. type balancerWrapper struct { + balancer.ClientConn balancer.Balancer gsb *Balancer builder balancer.Builder @@ -413,7 +415,3 @@ func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver bw.gsb.mu.Unlock() bw.gsb.cc.UpdateAddresses(sc, addrs) } - -func (bw *balancerWrapper) Target() string { - return bw.gsb.cc.Target() -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6e7dd6b7727..1e42b6fdc87 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -49,7 +49,7 @@ var ( // XDSFallbackSupport is the env variable that controls whether support for // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 29f234acb1b..2eb97f832b1 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -53,4 +53,14 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + + // XDSDualstackEndpointsEnabled is true if gRPC should read the + // "additional addresses" in the xDS endpoint resource. + XDSDualstackEndpointsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS", true) + + // XDSSystemRootCertsEnabled is true when xDS enabled gRPC clients can use + // the system's default root certificates for TLS certificate validation. + // For more details, see: + // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md. + XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false) ) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go deleted file mode 100644 index 6635f7bca96..00000000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcsync - -import ( - "sync" -) - -// OnceFunc returns a function wrapping f which ensures f is only executed -// once even if the returned function is executed multiple times. -func OnceFunc(f func()) func() { - var once sync.Once - return func() { - once.Do(f) - } -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 20b4dc3d353..13e1f386b1c 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -29,10 +29,12 @@ import ( ) var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker + // RegisterClientHealthCheckListener is used to provide a listener for + // updates from the client-side health checking service. It returns a + // function that can be called to stop the health producer. + RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by @@ -62,6 +64,9 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // MetricsRecorderForServer returns the MetricsRecorderList derived from a + // server's stats handlers. + MetricsRecorderForServer any // func (*grpc.Server) estats.MetricsRecorder // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // @@ -149,6 +154,34 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + // NewXDSResolverWithPoolForTesting creates a new xDS resolver builder + // using the provided xDS pool instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithPoolForTesting any // func(*xdsclient.Pool) (resolver.Builder, error) + + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder + // using the provided xDS client instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -255,3 +288,15 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" + +// EnforceSubConnEmbedding is used to enforce proper SubConn implementation +// embedding. +type EnforceSubConnEmbedding interface { + enforceSubConnEmbedding() +} + +// EnforceClientConnEmbedding is used to enforce proper ClientConn implementation +// embedding. +type EnforceClientConnEmbedding interface { + enforceClientConnEmbedding() +} diff --git a/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go new file mode 100644 index 00000000000..1f61f1a49d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proxyattributes contains functions for getting and setting proxy +// attributes like the CONNECT address and user info. +package proxyattributes + +import ( + "net/url" + + "google.golang.org/grpc/resolver" +) + +type keyType string + +const proxyOptionsKey = keyType("grpc.resolver.delegatingresolver.proxyOptions") + +// Options holds the proxy connection details needed during the CONNECT +// handshake. +type Options struct { + User *url.Userinfo + ConnectAddr string +} + +// Set returns a copy of addr with opts set in its attributes. +func Set(addr resolver.Address, opts Options) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(proxyOptionsKey, opts) + return addr +} + +// Get returns the Options for the proxy [resolver.Address] and a boolean +// value representing if the attribute is present or not. The returned data +// should not be mutated. +func Get(addr resolver.Address) (Options, bool) { + if a := addr.Attributes.Value(proxyOptionsKey); a != nil { + return a.(Options), true + } + return Options{}, false +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go new file mode 100644 index 00000000000..7b93f692be0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -0,0 +1,353 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package delegatingresolver implements a resolver capable of resolving both +// target URIs and proxy addresses. +package delegatingresolver + +import ( + "fmt" + "net/http" + "net/url" + "sync" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + logger = grpclog.Component("delegating-resolver") + // HTTPSProxyFromEnvironment will be overwritten in the tests + HTTPSProxyFromEnvironment = http.ProxyFromEnvironment +) + +// delegatingResolver manages both target URI and proxy address resolution by +// delegating these tasks to separate child resolvers. Essentially, it acts as +// a intermediary between the gRPC ClientConn and the child resolvers. +// +// It implements the [resolver.Resolver] interface. +type delegatingResolver struct { + target resolver.Target // parsed target URI to be resolved + cc resolver.ClientConn // gRPC ClientConn + proxyURL *url.URL // proxy URL, derived from proxy environment and target + + mu sync.Mutex // protects all the fields below + targetResolverState *resolver.State // state of the target resolver + proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured + + // childMu serializes calls into child resolvers. It also protects access to + // the following fields. + childMu sync.Mutex + targetResolver resolver.Resolver // resolver for the target URI, based on its scheme + proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured +} + +// nopResolver is a resolver that does nothing. +type nopResolver struct{} + +func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (nopResolver) Close() {} + +// proxyURLForTarget determines the proxy URL for the given address based on +// the environment. It can return the following: +// - nil URL, nil error: No proxy is configured or the address is excluded +// using the `NO_PROXY` environment variable or if req.URL.Host is +// "localhost" (with or without // a port number) +// - nil URL, non-nil error: An error occurred while retrieving the proxy URL. +// - non-nil URL, nil error: A proxy is configured, and the proxy URL was +// retrieved successfully without any errors. +func proxyURLForTarget(address string) (*url.URL, error) { + req := &http.Request{URL: &url.URL{ + Scheme: "https", + Host: address, + }} + return HTTPSProxyFromEnvironment(req) +} + +// New creates a new delegating resolver that can create up to two child +// resolvers: +// - one to resolve the proxy address specified using the supported +// environment variables. This uses the registered resolver for the "dns" +// scheme. +// - one to resolve the target URI using the resolver specified by the scheme +// in the target URI or specified by the user using the WithResolvers dial +// option. As a special case, if the target URI's scheme is "dns" and a +// proxy is specified using the supported environment variables, the target +// URI's path portion is used as the resolved address unless target +// resolution is enabled using the dial option. +func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) { + r := &delegatingResolver{ + target: target, + cc: cc, + } + + var err error + r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + } + + // proxy is not configured or proxy address excluded using `NO_PROXY` env + // var, so only target resolver is used. + if r.proxyURL == nil { + return targetResolverBuilder.Build(target, cc, opts) + } + + if logger.V(2) { + logger.Infof("Proxy URL detected : %s", r.proxyURL) + } + + // Resolver updates from one child may trigger calls into the other. Block + // updates until the children are initialized. + r.childMu.Lock() + defer r.childMu.Unlock() + // When the scheme is 'dns' and target resolution on client is not enabled, + // resolution should be handled by the proxy, not the client. Therefore, we + // bypass the target resolver and store the unresolved target address. + if target.URL.Scheme == "dns" && !targetResolutionEnabled { + state := resolver.State{ + Addresses: []resolver.Address{{Addr: target.Endpoint()}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, + } + r.targetResolverState = &state + } else { + wcc := &wrappingClientConn{ + stateListener: r.updateTargetResolverState, + parent: r, + } + if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil { + return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err) + } + } + + if r.proxyResolver, err = r.proxyURIResolver(opts); err != nil { + return nil, fmt.Errorf("delegating_resolver: failed to build resolver for proxy URL %q: %v", r.proxyURL, err) + } + + if r.targetResolver == nil { + r.targetResolver = nopResolver{} + } + if r.proxyResolver == nil { + r.proxyResolver = nopResolver{} + } + return r, nil +} + +// proxyURIResolver creates a resolver for resolving proxy URIs using the +// "dns" scheme. It adjusts the proxyURL to conform to the "dns:///" format and +// builds a resolver with a wrappingClientConn to capture resolved addresses. +func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) { + proxyBuilder := resolver.Get("dns") + if proxyBuilder == nil { + panic("delegating_resolver: resolver for proxy not found for scheme dns") + } + url := *r.proxyURL + url.Scheme = "dns" + url.Path = "/" + r.proxyURL.Host + url.Host = "" // Clear the Host field to conform to the "dns:///" format + + proxyTarget := resolver.Target{URL: url} + wcc := &wrappingClientConn{ + stateListener: r.updateProxyResolverState, + parent: r, + } + return proxyBuilder.Build(proxyTarget, wcc, opts) +} + +func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.childMu.Lock() + defer r.childMu.Unlock() + r.targetResolver.ResolveNow(o) + r.proxyResolver.ResolveNow(o) +} + +func (r *delegatingResolver) Close() { + r.childMu.Lock() + defer r.childMu.Unlock() + r.targetResolver.Close() + r.targetResolver = nil + + r.proxyResolver.Close() + r.proxyResolver = nil +} + +// updateClientConnStateLocked creates a list of combined addresses by +// pairing each proxy address with every target address. For each pair, it +// generates a new [resolver.Address] using the proxy address, and adding the +// target address as the attribute along with user info. It returns nil if +// either resolver has not sent update even once and returns the error from +// ClientConn update once both resolvers have sent update atleast once. +func (r *delegatingResolver) updateClientConnStateLocked() error { + if r.targetResolverState == nil || r.proxyAddrs == nil { + return nil + } + + curState := *r.targetResolverState + // If multiple resolved proxy addresses are present, we send only the + // unresolved proxy host and let net.Dial handle the proxy host name + // resolution when creating the transport. Sending all resolved addresses + // would increase the number of addresses passed to the ClientConn and + // subsequently to load balancing (LB) policies like Round Robin, leading + // to additional TCP connections. However, if there's only one resolved + // proxy address, we send it directly, as it doesn't affect the address + // count returned by the target resolver and the address count sent to the + // ClientConn. + var proxyAddr resolver.Address + if len(r.proxyAddrs) == 1 { + proxyAddr = r.proxyAddrs[0] + } else { + proxyAddr = resolver.Address{Addr: r.proxyURL.Host} + } + var addresses []resolver.Address + for _, targetAddr := range (*r.targetResolverState).Addresses { + addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{ + User: r.proxyURL.User, + ConnectAddr: targetAddr.Addr, + })) + } + + // Create a list of combined endpoints by pairing all proxy endpoints + // with every target endpoint. Each time, it constructs a new + // [resolver.Endpoint] using the all addresses from all the proxy endpoint + // and the target addresses from one endpoint. The target address and user + // information from the proxy URL are added as attributes to the proxy + // address.The resulting list of addresses is then grouped into endpoints, + // covering all combinations of proxy and target endpoints. + var endpoints []resolver.Endpoint + for _, endpt := range (*r.targetResolverState).Endpoints { + var addrs []resolver.Address + for _, proxyAddr := range r.proxyAddrs { + for _, targetAddr := range endpt.Addresses { + addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{ + User: r.proxyURL.User, + ConnectAddr: targetAddr.Addr, + })) + } + } + endpoints = append(endpoints, resolver.Endpoint{Addresses: addrs}) + } + // Use the targetResolverState for its service config and attributes + // contents. The state update is only sent after both the target and proxy + // resolvers have sent their updates, and curState has been updated with + // the combined addresses. + curState.Addresses = addresses + curState.Endpoints = endpoints + return r.cc.UpdateState(curState) +} + +// updateProxyResolverState updates the proxy resolver state by storing proxy +// addresses and endpoints, marking the resolver as ready, and triggering a +// state update if both proxy and target resolvers are ready. If the ClientConn +// returns a non-nil error, it calls `ResolveNow()` on the target resolver. It +// is a StateListener function of wrappingClientConn passed to the proxy resolver. +func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error { + r.mu.Lock() + defer r.mu.Unlock() + if logger.V(2) { + logger.Infof("Addresses received from proxy resolver: %s", state.Addresses) + } + if len(state.Endpoints) > 0 { + // We expect exactly one address per endpoint because the proxy + // resolver uses "dns" resolution. + r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints)) + for _, endpoint := range state.Endpoints { + r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...) + } + } else if state.Addresses != nil { + r.proxyAddrs = state.Addresses + } else { + r.proxyAddrs = []resolver.Address{} // ensure proxyAddrs is non-nil to indicate an update has been received + } + err := r.updateClientConnStateLocked() + // Another possible approach was to block until updates are received from + // both resolvers. But this is not used because calling `New()` triggers + // `Build()` for the first resolver, which calls `UpdateState()`. And the + // second resolver hasn't sent an update yet, so it would cause `New()` to + // block indefinitely. + if err != nil { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.targetResolver != nil { + r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() + } + return err +} + +// updateTargetResolverState updates the target resolver state by storing target +// addresses, endpoints, and service config, marking the resolver as ready, and +// triggering a state update if both resolvers are ready. If the ClientConn +// returns a non-nil error, it calls `ResolveNow()` on the proxy resolver. It +// is a StateListener function of wrappingClientConn passed to the target resolver. +func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error { + r.mu.Lock() + defer r.mu.Unlock() + + if logger.V(2) { + logger.Infof("Addresses received from target resolver: %v", state.Addresses) + } + r.targetResolverState = &state + err := r.updateClientConnStateLocked() + if err != nil { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.proxyResolver != nil { + r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() + } + return nil +} + +// wrappingClientConn serves as an intermediary between the parent ClientConn +// and the child resolvers created here. It implements the resolver.ClientConn +// interface and is passed in that capacity to the child resolvers. +type wrappingClientConn struct { + // Callback to deliver resolver state updates + stateListener func(state resolver.State) error + parent *delegatingResolver +} + +// UpdateState receives resolver state updates and forwards them to the +// appropriate listener function (either for the proxy or target resolver). +func (wcc *wrappingClientConn) UpdateState(state resolver.State) error { + return wcc.stateListener(state) +} + +// ReportError intercepts errors from the child resolvers and passes them to ClientConn. +func (wcc *wrappingClientConn) ReportError(err error) { + wcc.parent.cc.ReportError(err) +} + +// NewAddress intercepts the new resolved address from the child resolvers and +// passes them to ClientConn. +func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) { + wcc.UpdateState(resolver.State{Addresses: addrs}) +} + +// ParseServiceConfig parses the provided service config and returns an +// object that provides the parsed config. +func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult { + return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 8691698ef22..ba5c5a95d0d 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,8 +24,9 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net" + "net/netip" "os" "strconv" "strings" @@ -122,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. - if ipAddr, ok := formatIP(host); ok { + if ipAddr, err := formatIP(host); err == nil { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -237,7 +238,9 @@ func (d *dnsResolver) watcher() { } func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { - if !EnableSRVLookups { + // Skip this particular host to avoid timeouts with some versions of + // systemd-resolved. + if !EnableSRVLookups || d.host == "metadata.google.internal." { return nil, nil } var newAddrs []resolver.Address @@ -258,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -320,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -349,19 +352,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// formatIP returns an error if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false +// error = nil. +func formatIP(addr string) (string, error) { + ip, err := netip.ParseAddr(addr) + if err != nil { + return "", err } - if ip.To4() != nil { - return addr, true + if ip.Is4() { + return addr, nil } - return "[" + addr + "]", true + return "[" + addr + "]", nil } // parseTarget takes the user input target string and default port, returns @@ -377,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } - if ip := net.ParseIP(target); ip != nil { + if _, err := netip.ParseAddr(target); err == nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -425,7 +428,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.Intn(100)+1 <= *a + return rand.IntN(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go new file mode 100644 index 00000000000..8ed347c5419 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync/atomic" + + "golang.org/x/net/http2" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ClientStream implements streaming functionality for a gRPC client. +type ClientStream struct { + *Stream // Embed for common stream functionality. + + ct *http2Client + done chan struct{} // closed at the end of stream to unblock writers. + doneFunc func() // invoked at the end of stream. + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). + headerValid bool + header metadata.MD // the received header metadata + noHeaders bool // set if the client never received headers (set only after the stream is done). + + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + + status *status.Status // the status error received from the server +} + +// Read reads an n byte message from the input stream. +func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.ct.incrMsgRecv() + } + return b, err +} + +// Close closes the stream and popagates err to any readers. +func (s *ClientStream) Close(err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.ct.write(s, hdr, data, opts) +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *ClientStream) BytesReceived() bool { + return s.bytesReceived.Load() +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *ClientStream) Unprocessed() bool { + return s.unprocessed.Load() +} + +func (s *ClientStream) waitOnHeader() { + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.Close(ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ClientStream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *ClientStream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. Acquires the key-value +// pairs of header metadata once it is available. It blocks until i) the +// metadata is ready or ii) there is no header metadata or iii) the stream is +// canceled/expired. +func (s *ClientStream) Header() (metadata.MD, error) { + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. +func (s *ClientStream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *ClientStream) Status() *status.Status { + return s.status +} diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 97198c51588..dfc0f224ec8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 + if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() - return w + return 0 } - f.updateEffectiveWindowSize() - return 0 + return f.reset() } func (f *trInFlow) reset() uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index ce878693bd7..3dea2357351 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -289,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +357,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl return nil } -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,16 +408,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, + s := &ServerStream{ + Stream: &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + buf: newRecvBuffer(), + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + }, cancel: cancel, - buf: newRecvBuffer(), st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -471,9 +473,7 @@ func (ht *serverHandlerTransport) runStream() { } } -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} +func (ht *serverHandlerTransport) incrMsgRecv() {} func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") @@ -498,5 +498,5 @@ func mapRecvMsgError(err error) error { if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } - return connectionErrorf(true, err, err.Error()) + return connectionErrorf(true, err, "%s", err.Error()) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 62b81885d8e..513dbb93d55 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -43,6 +43,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/proxyattributes" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -123,7 +124,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ClientStream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -153,7 +154,7 @@ type http2Client struct { logger *grpclog.PrefixLogger } -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, grpcUA string) (net.Conn, error) { address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { @@ -177,8 +178,8 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if !ok { networkType, address = parseDialTarget(address) } - if networkType == "tcp" && useProxy { - return proxyDial(ctx, address, grpcUA) + if opts, present := proxyattributes.Get(addr); present { + return proxyDial(ctx, addr, grpcUA, opts) } return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } @@ -199,10 +200,10 @@ func isTemporary(err error) bool { return true } -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -217,7 +218,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // address specific arbitrary data to reach custom dialers and credential handshakers. connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) @@ -339,7 +340,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -480,17 +481,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, + s := &ClientStream{ + Stream: &Stream{ + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + contentSubtype: callHdr.ContentSubtype, + }, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -506,7 +509,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { - t.CloseStream(s, err) + s.Close(err) }, }, windowHandler: func(n int) { @@ -597,12 +600,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -738,7 +735,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -763,7 +760,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return } // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -908,21 +905,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return s, nil } -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1085,7 +1068,7 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { reader := data.Reader() if opts.Last { @@ -1114,10 +1097,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O _ = reader.Close() return err } + t.incrMsgSent() return nil } -func (t *http2Client) getStream(f http2.Frame) *Stream { +func (t *http2Client) getStream(f http2.Frame) *ClientStream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1127,7 +1111,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1136,7 +1120,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { +func (t *http2Client) updateWindow(s *ClientStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1242,7 +1226,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1383,11 +1367,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*Stream, 0) + streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) + stream.unprocessed.Store(true) streamsToClose = append(streamsToClose, stream) } } @@ -1439,7 +1423,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() - atomic.StoreUint32(&s.bytesReceived, 1) + s.bytesReceived.Store(true) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1809,14 +1793,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Client) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) + } } -func (t *http2Client) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) + } } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 584b50fe553..997b0a59b58 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - "math/rand" + rand "math/rand/v2" "net" "net/http" "strconv" @@ -111,7 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ServerStream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -256,7 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ServerStream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -359,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &Stream{ - id: streamID, + s := &ServerStream{ + Stream: &Stream{ + id: streamID, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + }, st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -537,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } } t.mu.Lock() if t.state != reachable { @@ -568,7 +564,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 405, + httpStatus: http.StatusMethodNotAllowed, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -589,7 +585,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 200, + httpStatus: http.StatusOK, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, @@ -634,7 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -698,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -716,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -726,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { +func (t *http2Server) updateWindow(s *ServerStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -963,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *Stream) error { +func (t *http2Server) streamContextErr(s *ServerStream) error { select { case <-t.done: return ErrConnClosing @@ -973,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1006,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1046,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1117,11 +1113,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { + if err := t.writeHeader(s, nil); err != nil { _ = reader.Close() return err } @@ -1147,6 +1143,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Opti _ = reader.Close() return err } + t.incrMsgSent() return nil } @@ -1276,7 +1273,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1297,7 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1321,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1415,14 +1412,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Server) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +func (t *http2Server) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) + } } -func (t *http2Server) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +func (t *http2Server) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) + } } func (t *http2Server) getOutFlowWindow() int64 { @@ -1455,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int63n(2*r) - r + j := rand.Int64N(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 54b22443654..d7738459550 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -30,34 +30,16 @@ import ( "net/url" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/resolver" ) const proxyAuthHeaderKey = "Proxy-Authorization" -var ( - // The following variable will be overwritten in the tests. - httpProxyFromEnvironment = http.ProxyFromEnvironment -) - -func mapAddress(address string) (*url.URL, error) { - req := &http.Request{ - URL: &url.URL{ - Scheme: "https", - Host: address, - }, - } - url, err := httpProxyFromEnvironment(req) - if err != nil { - return nil, err - } - return url, nil -} - // To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. -// It's possible that this reader reads more than what's need for the response and stores -// those bytes in the buffer. -// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the -// bytes in the buffer. +// It's possible that this reader reads more than what's need for the response +// and stores those bytes in the buffer. bufConn wraps the original net.Conn +// and the bufio.Reader to make sure we don't lose the bytes in the buffer. type bufConn struct { net.Conn r io.Reader @@ -72,7 +54,7 @@ func basicAuth(username, password string) string { return base64.StdEncoding.EncodeToString([]byte(auth)) } -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, grpcUA string, opts proxyattributes.Options) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() @@ -81,15 +63,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri req := &http.Request{ Method: http.MethodConnect, - URL: &url.URL{Host: backendAddr}, + URL: &url.URL{Host: opts.ConnectAddr}, Header: map[string][]string{"User-Agent": {grpcUA}}, } - if t := proxyURL.User; t != nil { - u := t.Username() - p, _ := t.Password() + if user := opts.User; user != nil { + u := user.Username() + p, _ := user.Password() req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) } - if err := sendHTTPRequest(ctx, req, conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } @@ -117,28 +98,13 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri return conn, nil } -// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy -// is necessary, dials, does the HTTP CONNECT handshake, and returns the -// connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { - newAddr := addr - proxyURL, err := mapAddress(addr) - if err != nil { - return nil, err - } - if proxyURL != nil { - newAddr = proxyURL.Host - } - - conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) +// proxyDial establishes a TCP connection to the specified address and performs an HTTP CONNECT handshake. +func proxyDial(ctx context.Context, addr resolver.Address, grpcUA string, opts proxyattributes.Options) (net.Conn, error) { + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", addr.Addr) if err != nil { return nil, err } - if proxyURL == nil { - // proxy is disabled if proxyURL is nil. - return conn, err - } - return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return doHTTPConnectHandshake(ctx, conn, grpcUA, opts) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go new file mode 100644 index 00000000000..a22a9015149 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "errors" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ServerStream implements streaming functionality for a gRPC server. +type ServerStream struct { + *Stream // Embed for common stream functionality. + + st internalServerTransport + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. + clientAdvertisedCompressors string + headerWireLength int + + // hdrMu protects outgoing header and trailer metadata. + hdrMu sync.Mutex + header metadata.MD // the outgoing header metadata. Updated by WriteHeader. + headerSent atomic.Bool // atomically set when the headers are sent out. +} + +// Read reads an n byte message from the input stream. +func (s *ServerStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.st.incrMsgRecv() + } + return b, err +} + +// SendHeader sends the header metadata for the given stream. +func (s *ServerStream) SendHeader(md metadata.MD) error { + return s.st.writeHeader(s, md) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.st.write(s, hdr, data, opts) +} + +// WriteStatus sends the status of a stream to the client. WriteStatus is +// the final call made on a stream and always occurs. +func (s *ServerStream) WriteStatus(st *status.Status) error { + return s.st.writeStatus(s, st) +} + +// isHeaderSent indicates whether headers have been sent. +func (s *ServerStream) isHeaderSent() bool { + return s.headerSent.Load() +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. +func (s *ServerStream) updateHeaderSent() bool { + return s.headerSent.Swap(true) +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ServerStream) RecvCompress() string { + return s.recvCompress +} + +// SendCompress returns the send compressor name. +func (s *ServerStream) SendCompress() string { + return s.sendCompress +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *ServerStream) ContentSubtype() string { + return s.contentSubtype +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *ServerStream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *ServerStream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *ServerStream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Header returns the header metadata of the stream. It returns the out header +// after t.WriteHeader is called. It does not block and must not be called +// until after WriteHeader. +func (s *ServerStream) Header() (metadata.MD, error) { + // Return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. +func (s *ServerStream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// This should not be called in parallel to other data writes. +func (s *ServerStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. +// This should not be called parallel to other data writes. +func (s *ServerStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index e12cb0bc914..af4a4aeab14 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,7 +27,6 @@ import ( "fmt" "io" "net" - "strings" "sync" "sync/atomic" "time" @@ -39,7 +38,6 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -133,7 +131,7 @@ type recvBufferReader struct { err error } -func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } @@ -142,9 +140,9 @@ func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { - n, r.err = r.readHeaderClient(header) + n, r.err = r.readMessageHeaderClient(header) } else { - n, r.err = r.readHeader(header) + n, r.err = r.readMessageHeader(header) } return n, r.err } @@ -174,12 +172,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } -func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -192,7 +190,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -213,9 +211,9 @@ func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -246,7 +244,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { if m.buffer != nil { @@ -288,14 +286,8 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream + ctx context.Context // the associated context of the stream + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer @@ -303,58 +295,17 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - state streamState - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 + trailer metadata.MD // the key-value map of trailer metadata. } func (s *Stream) swapState(st streamState) streamState { @@ -369,110 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - // Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. +// after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype + return s.trailer.Copy() } // Context returns the context of the stream. @@ -480,90 +333,31 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// ReadHeader reads data into the provided header slice from the stream. It -// first checks if there was an error during a previous read operation and +// ReadMessageHeader reads data into the provided header slice from the stream. +// It first checks if there was an error during a previous read operation and // returns it if present. It then requests a read operation for the length of // the header. It continues to read from the stream until the entire header -// slice is filled or an error occurs. If an `io.EOF` error is encountered -// with partially read data, it is converted to `io.ErrUnexpectedEOF` to -// indicate an unexpected end of the stream. The method returns any error -// encountered during the read process or nil if the header was successfully -// read. -func (s *Stream) ReadHeader(header []byte) (err error) { +// slice is filled or an error occurs. If an `io.EOF` error is encountered with +// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an +// unexpected end of the stream. The method returns any error encountered during +// the read process or nil if the header was successfully read. +func (s *Stream) ReadMessageHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { - n, err := s.trReader.ReadHeader(header) + n, err := s.trReader.ReadMessageHeader(header) header = header[n:] if len(header) == 0 { err = nil @@ -579,7 +373,7 @@ func (s *Stream) ReadHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. -func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { +func (s *Stream) read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -619,8 +413,8 @@ type transportReader struct { er error } -func (t *transportReader) ReadHeader(header []byte) (int, error) { - n, err := t.reader.ReadHeader(header) +func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { + n, err := t.reader.ReadMessageHeader(header) if err != nil { t.er = err return 0, err @@ -639,17 +433,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -719,21 +502,13 @@ type ConnectOptions struct { ChannelzParent *channelz.SubChannel // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 - // UseProxy specifies if a proxy should be used. - UseProxy bool // The mem.BufferPool to use when reading/writing to the wire. BufferPool mem.BufferPool } -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message +// WriteOptions provides additional hints and information for message // transmission. -type Options struct { +type WriteOptions struct { // Last indicates whether this write is the last piece for // this stream. Last bool @@ -782,18 +557,8 @@ type ClientTransport interface { // It does not block. GracefulClose() - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) + NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -813,12 +578,6 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -828,19 +587,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error + HandleStreams(context.Context, func(*ServerStream)) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -852,12 +599,14 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) +} - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() +type internalServerTransport interface { + ServerTransport + writeHeader(s *ServerStream, md metadata.MD) error + write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error + writeStatus(s *ServerStream, st *status.Status) error + incrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index 228e9c2f20f..65002e2cc85 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -22,6 +22,11 @@ import ( "io" ) +const ( + // 32 KiB is what io.Copy uses. + readAllBufSize = 32 * 1024 +) + // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -219,8 +224,58 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. +// buffer into a new Buffer pulled from the given pool and the Buffer is +// added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { return &writer{buffers: buffers, pool: pool} } + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +// +// Important: A failed call returns a non-nil error and may also return +// partially read buffers. It is the responsibility of the caller to free the +// BufferSlice returned, or its memory will not be reused. +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { + var result BufferSlice + if wt, ok := r.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } +nextBuffer: + for { + buf := pool.Get(readAllBufSize) + // We asked for 32KiB but may have been given a bigger buffer. + // Use all of it if that's the case. + *buf = (*buf)[:cap(*buf)] + usedCap := 0 + for { + n, err := r.Read((*buf)[usedCap:]) + usedCap += n + if err != nil { + if usedCap == 0 { + // Nothing in this buf, put it back + pool.Put(buf) + } else { + *buf = (*buf)[:usedCap] + result = append(result, NewBuffer(buf, pool)) + } + if err == io.EOF { + err = nil + } + return result, err + } + if len(*buf) == usedCap { + result = append(result, NewBuffer(buf, pool)) + continue nextBuffer + } + } + } +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index bdaa2130e48..a2d2a798d48 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -123,7 +123,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else { - errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) + errStr = fmt.Sprintf("%v while waiting for connections to become ready", ctx.Err()) } switch ctx.Err() { case context.DeadlineExceeded: diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index e87a17f36a5..ee0ff969af4 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -62,7 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() - p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -76,7 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() - compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index e1f58104d85..12f2c933730 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.4 // protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto @@ -32,6 +32,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -43,16 +44,13 @@ const ( // The message sent by the client when calling ServerReflectionInfo method. type ServerReflectionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their // defined field and then handles them using corresponding methods. // - // Types that are assignable to MessageRequest: + // Types that are valid to be assigned to MessageRequest: // // *ServerReflectionRequest_FileByFilename // *ServerReflectionRequest_FileContainingSymbol @@ -60,15 +58,15 @@ type ServerReflectionRequest struct { // *ServerReflectionRequest_AllExtensionNumbersOfType // *ServerReflectionRequest_ListServices MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionRequest) String() string { @@ -79,7 +77,7 @@ func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -101,44 +99,54 @@ func (x *ServerReflectionRequest) GetHost() string { return "" } -func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { - if m != nil { - return m.MessageRequest +func (x *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if x != nil { + return x.MessageRequest } return nil } func (x *ServerReflectionRequest) GetFileByFilename() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { - return x.FileByFilename + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } } return "" } func (x *ServerReflectionRequest) GetFileContainingSymbol() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { - return x.FileContainingSymbol + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } } return "" } func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { - return x.FileContainingExtension + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } } return nil } func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { - return x.AllExtensionNumbersOfType + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } } return "" } func (x *ServerReflectionRequest) GetListServices() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { - return x.ListServices + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } } return "" } @@ -197,22 +205,19 @@ func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRe // The type name and extension number sent by the client when requesting // file_containing_extension. type ExtensionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Fully-qualified type name. The format should be . ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRequest) String() string { @@ -223,7 +228,7 @@ func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -254,31 +259,28 @@ func (x *ExtensionRequest) GetExtensionNumber() int32 { // The message sent by the server to answer ServerReflectionInfo method. type ServerReflectionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The server sets one of the following fields according to the message_request // in the request. // - // Types that are assignable to MessageResponse: + // Types that are valid to be assigned to MessageResponse: // // *ServerReflectionResponse_FileDescriptorResponse // *ServerReflectionResponse_AllExtensionNumbersResponse // *ServerReflectionResponse_ListServicesResponse // *ServerReflectionResponse_ErrorResponse MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionResponse) String() string { @@ -289,7 +291,7 @@ func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -318,37 +320,45 @@ func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest return nil } -func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { - if m != nil { - return m.MessageResponse +func (x *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if x != nil { + return x.MessageResponse } return nil } func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { - return x.FileDescriptorResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } } return nil } func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { - return x.AllExtensionNumbersResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } } return nil } func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { - return x.ListServicesResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } } return nil } func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { - return x.ErrorResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } } return nil } @@ -396,23 +406,20 @@ func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_Messag // a file_by_filename, file_containing_symbol, or file_containing_extension // request. type FileDescriptorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorResponse) String() string { @@ -423,7 +430,7 @@ func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -448,23 +455,20 @@ func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { // A list of extension numbers sent by the server answering // all_extension_numbers_of_type request. type ExtensionNumberResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Full name of the base type, including the package name. The format // is . BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionNumberResponse) String() string { @@ -475,7 +479,7 @@ func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -506,22 +510,19 @@ func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { // A list of ServiceResponse sent by the server answering list_services request. type ListServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. - Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListServiceResponse) String() string { @@ -532,7 +533,7 @@ func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -557,22 +558,19 @@ func (x *ListServiceResponse) GetService() []*ServiceResponse { // The information of a single service used by ListServiceResponse to answer // list_services request. type ServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Full name of a registered service, including its package name. The format // is . - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServiceResponse) Reset() { *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceResponse) String() string { @@ -583,7 +581,7 @@ func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -607,22 +605,19 @@ func (x *ServiceResponse) GetName() string { // The error code and error message sent by the server when an error occurs. type ErrorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This field uses the error codes defined in grpc::StatusCode. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ErrorResponse) Reset() { *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorResponse) String() string { @@ -633,7 +628,7 @@ func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -664,7 +659,7 @@ func (x *ErrorResponse) GetErrorMessage() string { var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor -var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ +var file_grpc_reflection_v1_reflection_proto_rawDesc = string([]byte{ 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, @@ -774,16 +769,16 @@ var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc + file_grpc_reflection_v1_reflection_proto_rawDescData []byte ) func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1_reflection_proto_rawDesc), len(file_grpc_reflection_v1_reflection_proto_rawDesc))) }) return file_grpc_reflection_v1_reflection_proto_rawDescData } @@ -821,104 +816,6 @@ func file_grpc_reflection_v1_reflection_proto_init() { if File_grpc_reflection_v1_reflection_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), @@ -936,7 +833,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1_reflection_proto_rawDesc), len(file_grpc_reflection_v1_reflection_proto_rawDesc)), NumEnums: 0, NumMessages: 8, NumExtensions: 0, @@ -947,7 +844,6 @@ func file_grpc_reflection_v1_reflection_proto_init() { MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, }.Build() File_grpc_reflection_v1_reflection_proto = out.File - file_grpc_reflection_v1_reflection_proto_rawDesc = nil file_grpc_reflection_v1_reflection_proto_goTypes = nil file_grpc_reflection_v1_reflection_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 0582e16af2b..fcf9c1aa0ad 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.4 // protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. @@ -29,6 +29,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -42,17 +43,14 @@ const ( // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their // defined field and then handles them using corresponding methods. // - // Types that are assignable to MessageRequest: + // Types that are valid to be assigned to MessageRequest: // // *ServerReflectionRequest_FileByFilename // *ServerReflectionRequest_FileContainingSymbol @@ -60,15 +58,15 @@ type ServerReflectionRequest struct { // *ServerReflectionRequest_AllExtensionNumbersOfType // *ServerReflectionRequest_ListServices MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionRequest) String() string { @@ -79,7 +77,7 @@ func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -102,49 +100,59 @@ func (x *ServerReflectionRequest) GetHost() string { return "" } -func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { - if m != nil { - return m.MessageRequest +func (x *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if x != nil { + return x.MessageRequest } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileByFilename() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { - return x.FileByFilename + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } } return "" } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingSymbol() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { - return x.FileContainingSymbol + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } } return "" } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { - return x.FileContainingExtension + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { - return x.AllExtensionNumbersOfType + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } } return "" } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetListServices() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { - return x.ListServices + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } } return "" } @@ -215,25 +223,22 @@ func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRe // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Fully-qualified type name. The format should be . // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRequest) String() string { @@ -244,7 +249,7 @@ func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -279,10 +284,7 @@ func (x *ExtensionRequest) GetExtensionNumber() int32 { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. @@ -290,22 +292,22 @@ type ServerReflectionResponse struct { // The server set one of the following fields according to the message_request // in the request. // - // Types that are assignable to MessageResponse: + // Types that are valid to be assigned to MessageResponse: // // *ServerReflectionResponse_FileDescriptorResponse // *ServerReflectionResponse_AllExtensionNumbersResponse // *ServerReflectionResponse_ListServicesResponse // *ServerReflectionResponse_ErrorResponse MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionResponse) String() string { @@ -316,7 +318,7 @@ func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -347,41 +349,49 @@ func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest return nil } -func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { - if m != nil { - return m.MessageResponse +func (x *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if x != nil { + return x.MessageResponse } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { - return x.FileDescriptorResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { - return x.AllExtensionNumbersResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { - return x.ListServicesResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { - return x.ErrorResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } } return nil } @@ -439,25 +449,22 @@ func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_Messag // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type FileDescriptorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorResponse) String() string { @@ -468,7 +475,7 @@ func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -496,10 +503,7 @@ func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionNumberResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Full name of the base type, including the package name. The format // is . // @@ -507,15 +511,15 @@ type ExtensionNumberResponse struct { BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionNumberResponse) String() string { @@ -526,7 +530,7 @@ func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -561,24 +565,21 @@ func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ListServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListServiceResponse) String() string { @@ -589,7 +590,7 @@ func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -617,24 +618,21 @@ func (x *ListServiceResponse) GetService() []*ServiceResponse { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Full name of a registered service, including its package name. The format // is . // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServiceResponse) Reset() { *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceResponse) String() string { @@ -645,7 +643,7 @@ func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -672,25 +670,22 @@ func (x *ServiceResponse) GetName() string { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ErrorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This field uses the error codes defined in grpc::StatusCode. // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ErrorResponse) Reset() { *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorResponse) String() string { @@ -701,7 +696,7 @@ func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -734,7 +729,7 @@ func (x *ErrorResponse) GetErrorMessage() string { var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor -var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = string([]byte{ 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, @@ -849,16 +844,16 @@ var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc + file_grpc_reflection_v1alpha_reflection_proto_rawDescData []byte ) func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1alpha_reflection_proto_rawDesc), len(file_grpc_reflection_v1alpha_reflection_proto_rawDesc))) }) return file_grpc_reflection_v1alpha_reflection_proto_rawDescData } @@ -896,104 +891,6 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { if File_grpc_reflection_v1alpha_reflection_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), @@ -1011,7 +908,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1alpha_reflection_proto_rawDesc), len(file_grpc_reflection_v1alpha_reflection_proto_rawDesc)), NumEnums: 0, NumMessages: 8, NumExtensions: 0, @@ -1022,7 +919,6 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, }.Build() File_grpc_reflection_v1alpha_reflection_proto = out.File - file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index ada5b9bb79b..975b499706a 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -18,6 +18,12 @@ package resolver +import ( + "encoding/base64" + "sort" + "strings" +) + type addressMapEntry struct { addr Address value any @@ -137,66 +143,61 @@ func (a *AddressMap) Values() []any { return ret } -type endpointNode struct { - addrs map[string]struct{} -} - -// Equal returns whether the unordered set of addrs are the same between the -// endpoint nodes. -func (en *endpointNode) Equal(en2 *endpointNode) bool { - if len(en.addrs) != len(en2.addrs) { - return false - } - for addr := range en.addrs { - if _, ok := en2.addrs[addr]; !ok { - return false - } - } - return true -} - -func toEndpointNode(endpoint Endpoint) endpointNode { - en := make(map[string]struct{}) - for _, addr := range endpoint.Addresses { - en[addr.Addr] = struct{}{} - } - return endpointNode{ - addrs: en, - } -} +type endpointMapKey string // EndpointMap is a map of endpoints to arbitrary values keyed on only the // unordered set of address strings within an endpoint. This map is not thread // safe, thus it is unsafe to access concurrently. Must be created via // NewEndpointMap; do not construct directly. type EndpointMap struct { - endpoints map[*endpointNode]any + endpoints map[endpointMapKey]endpointData +} + +type endpointData struct { + // decodedKey stores the original key to avoid decoding when iterating on + // EndpointMap keys. + decodedKey Endpoint + value any } // NewEndpointMap creates a new EndpointMap. func NewEndpointMap() *EndpointMap { return &EndpointMap{ - endpoints: make(map[*endpointNode]any), + endpoints: make(map[endpointMapKey]endpointData), + } +} + +// encodeEndpoint returns a string that uniquely identifies the unordered set of +// addresses within an endpoint. +func encodeEndpoint(e Endpoint) endpointMapKey { + addrs := make([]string, 0, len(e.Addresses)) + // base64 encoding the address strings restricts the characters present + // within the strings. This allows us to use a delimiter without the need of + // escape characters. + for _, addr := range e.Addresses { + addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr))) } + sort.Strings(addrs) + // " " should not appear in base64 encoded strings. + return endpointMapKey(strings.Join(addrs, " ")) } // Get returns the value for the address in the map, if present. func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - return em.endpoints[endpoint], true + val, found := em.endpoints[encodeEndpoint(e)] + if found { + return val.value, true } return nil, false } // Set updates or adds the value to the address in the map. func (em *EndpointMap) Set(e Endpoint, value any) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - em.endpoints[endpoint] = value - return + en := encodeEndpoint(e) + em.endpoints[en] = endpointData{ + decodedKey: Endpoint{Addresses: e.Addresses}, + value: value, } - em.endpoints[&en] = value } // Len returns the number of entries in the map. @@ -211,12 +212,8 @@ func (em *EndpointMap) Len() int { // used for EndpointMap accesses. func (em *EndpointMap) Keys() []Endpoint { ret := make([]Endpoint, 0, len(em.endpoints)) - for en := range em.endpoints { - var endpoint Endpoint - for addr := range en.addrs { - endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) - } - ret = append(ret, endpoint) + for _, en := range em.endpoints { + ret = append(ret, en.decodedKey) } return ret } @@ -225,27 +222,13 @@ func (em *EndpointMap) Keys() []Endpoint { func (em *EndpointMap) Values() []any { ret := make([]any, 0, len(em.endpoints)) for _, val := range em.endpoints { - ret = append(ret, val) + ret = append(ret, val.value) } return ret } -// find returns a pointer to the endpoint node in em if the endpoint node is -// already present. If not found, nil is returned. The comparisons are done on -// the unordered set of addresses within an endpoint. -func (em EndpointMap) find(e endpointNode) *endpointNode { - for endpoint := range em.endpoints { - if e.Equal(endpoint) { - return endpoint - } - } - return nil -} - // Delete removes the specified endpoint from the map. func (em *EndpointMap) Delete(e Endpoint) { - en := toEndpointNode(e) - if entry := em.find(en); entry != nil { - delete(em.endpoints, entry) - } + en := encodeEndpoint(e) + delete(em.endpoints, en) } diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 202854511b8..b84ef26d46d 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,6 +22,7 @@ package resolver import ( "context" + "errors" "fmt" "net" "net/url" @@ -29,6 +30,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) @@ -174,6 +176,8 @@ type BuildOptions struct { // Authority is the effective authority of the clientconn for which the // resolver is built. Authority string + // MetricsRecorder is the metrics recorder to do recording. + MetricsRecorder stats.MetricsRecorder } // An Endpoint is one network endpoint, or server, which may have multiple @@ -237,8 +241,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. + // error. The ClientConn then forwards this error to the load balancing + // policy. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -330,3 +334,20 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } + +// ValidateEndpoints validates endpoints from a petiole policy's perspective. +// Petiole policies should call this before calling into their children. See +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) +// for details. +func ValidateEndpoints(endpoints []Endpoint) error { + if len(endpoints) == 0 { + return errors.New("endpoints list is empty") + } + + for _, endpoint := range endpoints { + for range endpoint.Addresses { + return nil + } + } + return errors.New("endpoints list contains no addresses") +} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 23bb3fb2582..80e16a327cd 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/resolver/delegatingresolver" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -76,9 +77,19 @@ func (ccr *ccResolverWrapper) start() error { CredsBundle: ccr.cc.dopts.copts.CredsBundle, Dialer: ccr.cc.dopts.copts.Dialer, Authority: ccr.cc.authority, + MetricsRecorder: ccr.cc.metricsRecorderList, } var err error - ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + // The delegating resolver is used unless: + // - A custom dialer is provided via WithContextDialer dialoption or + // - Proxy usage is disabled through WithNoProxy dialoption. + // In these cases, the resolver is built based on the scheme of target, + // using the appropriate resolver builder. + if ccr.cc.dopts.copts.Dialer != nil || !ccr.cc.dopts.useProxy { + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + } else { + ccr.resolver, err = delegatingresolver.New(ccr.cc.parsedTarget, ccr, opts, ccr.cc.resolverBuilder, ccr.cc.dopts.enableLocalDNSResolution) + } errCh <- err }) return <-errCh @@ -123,12 +134,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { return nil } if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } + s.Endpoints = addressesToEndpoints(s.Addresses) } ccr.addChannelzTraceEvent(s) ccr.curState = s @@ -161,7 +167,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.cc.mu.Unlock() return } - s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + s := resolver.State{ + Addresses: addrs, + ServiceConfig: ccr.curState.ServiceConfig, + Endpoints: addressesToEndpoints(addrs), + } ccr.addChannelzTraceEvent(s) ccr.curState = s ccr.mu.Unlock() @@ -199,3 +209,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } + +func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint { + endpoints := make([]resolver.Endpoint, 0, len(addrs)) + for _, a := range addrs { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + endpoints = append(endpoints, ep) + } + return endpoints +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index aba1ae3e678..ad20e9dff20 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -151,7 +151,7 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - compressorType string + compressorName string failFast bool maxReceiveMessageSize *int maxSendMessageSize *int @@ -222,7 +222,7 @@ type HeaderCallOption struct { func (o HeaderCallOption) before(*callInfo) error { return nil } func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { - *o.HeaderAddr, _ = attempt.s.Header() + *o.HeaderAddr, _ = attempt.transportStream.Header() } // Trailer returns a CallOptions that retrieves the trailer metadata @@ -244,7 +244,7 @@ type TrailerCallOption struct { func (o TrailerCallOption) before(*callInfo) error { return nil } func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { - *o.TrailerAddr = attempt.s.Trailer() + *o.TrailerAddr = attempt.transportStream.Trailer() } // Peer returns a CallOption that retrieves peer information for a unary RPC. @@ -266,7 +266,7 @@ type PeerCallOption struct { func (o PeerCallOption) before(*callInfo) error { return nil } func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { - if x, ok := peer.FromContext(attempt.s.Context()); ok { + if x, ok := peer.FromContext(attempt.transportStream.Context()); ok { *o.PeerAddr = *x } } @@ -435,7 +435,7 @@ type CompressorCallOption struct { } func (o CompressorCallOption) before(c *callInfo) error { - c.compressorType = o.CompressorType + c.compressorName = o.CompressorType return nil } func (o CompressorCallOption) after(*callInfo, *csAttempt) {} @@ -622,7 +622,7 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { - ReadHeader(header []byte) error + ReadMessageHeader(header []byte) error Read(n int) (mem.BufferSlice, error) } @@ -656,7 +656,7 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { - err := p.r.ReadHeader(p.header[:]) + err := p.r.ReadMessageHeader(p.header[:]) if err != nil { return 0, nil, err } @@ -664,9 +664,6 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) - if length == 0 { - return pf, nil, nil - } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -695,9 +692,9 @@ func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(b.Len()) > math.MaxUint32 { + if bufSize := uint(b.Len()); bufSize > math.MaxUint32 { b.Free() - return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", bufSize) } return b, nil } @@ -817,7 +814,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -831,30 +828,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, st.Err() } - var size int if pf.isCompressed() { defer compressed.Free() - // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. - if dc != nil { - var uncompressedBuf []byte - uncompressedBuf, err = dc.Do(compressed.Reader()) - if err == nil { - out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} - } - size = len(uncompressedBuf) - } else { - out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) - } + out, err = decompress(compressor, compressed, dc, maxReceiveMessageSize, p.bufferPool) if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) - } - if size > maxReceiveMessageSize { - out.Free() - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, err } } else { out = compressed @@ -869,49 +849,56 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return out, nil } -// Using compressor, decompress d, returning data and size. -// Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { - dcReader, err := compressor.Decompress(d.Reader()) - if err != nil { - return nil, 0, err +// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor. +// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data +// does not exceed the specified maximum size and returns an error if this limit is exceeded. +// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit. +func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) { + if dc != nil { + uncompressed, err := dc.Do(d.Reader()) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if len(uncompressed) > maxReceiveMessageSize { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize) + } + return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil } + if compressor != nil { + dcReader, err := compressor.Decompress(d.Reader()) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err) + } - // TODO: Can/should this still be preserved with the new BufferSlice API? Are - // there any actual benefits to allocating a single large buffer instead of - // multiple smaller ones? - //if sizer, ok := compressor.(interface { - // DecompressedSize(compressedBytes []byte) int - //}); ok { - // if size := sizer.DecompressedSize(d); size >= 0 { - // if size > maxReceiveMessageSize { - // return nil, size, nil - // } - // // size is used as an estimate to size the buffer, but we - // // will read more data if available. - // // +MinRead so ReadFrom will not reallocate if size is correct. - // // - // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // // we can also utilize the recv buffer pool here. - // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - // return buf.Bytes(), int(bytesRead), err - // } - //} + // Read at most one byte more than the limit from the decompressor. + // Unless the limit is MaxInt64, in which case, that's impossible, so + // apply no limit. + if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 { + dcReader = io.LimitReader(dcReader, limit+1) + } + out, err := mem.ReadAll(dcReader, pool) + if err != nil { + out.Free() + return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err) + } - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - if err != nil { - out.Free() - return nil, 0, err + if out.Len() > maxReceiveMessageSize { + out.Free() + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize) + } + return out, nil } - return out, out.Len(), nil + return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload") +} + +type recvCompressor interface { + RecvCompress() string } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index d1e1415a40f..976e70ae068 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -37,12 +37,14 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/mem" @@ -82,17 +84,21 @@ func init() { internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption internal.BufferPool = bufferPool + internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder { + return istats.NewMetricsRecorderList(srv.opts.statsHandlers) + } } var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +// MethodHandler is a function type that processes a unary RPC method call. +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler methodHandler + Handler MethodHandler } // ServiceDesc represents an RPC service's specification. @@ -621,8 +627,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.ServerStream channel forever and waits +// for data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -642,7 +648,7 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) - s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + s.serverWorkerChannelClose = sync.OnceFunc(func() { close(s.serverWorkerChannel) }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { @@ -1020,7 +1026,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { + st.HandleStreams(ctx, func(stream *transport.ServerStream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1136,7 +1142,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1165,7 +1171,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } - err = t.Write(stream, hdr, payload, opts) + err = stream.Write(hdr, payload, opts) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1212,7 +1218,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1320,7 +1326,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) + stream.WriteStatus(st) return st.Err() } } @@ -1354,16 +1360,21 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + if e := stream.WriteStatus(status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } - defer d.Free() - if channelz.IsOn() { - t.IncrMsgRecv() + freed := false + dataFree := func() { + if !freed { + d.Free() + freed = true + } } + defer dataFree() df := func(v any) error { + defer dataFree() if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1404,7 +1415,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } - if e := t.WriteStatus(stream, appStatus); e != nil { + if e := stream.WriteStatus(appStatus); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1431,20 +1442,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{Last: true} + opts := &transport.WriteOptions{Last: true} // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, sts); e != nil { + if e := stream.WriteStatus(sts); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1484,9 +1495,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, sm) } } - if channelz.IsOn() { - t.IncrMsgSent() - } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1502,7 +1510,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, st) } } - return t.WriteStatus(stream, statusOK) + return stream.WriteStatus(statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1541,7 +1549,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1561,7 +1569,6 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, - t: t, s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), @@ -1643,12 +1650,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { - ss.dc = s.opts.dc + ss.decompressorV0 = s.opts.dc } else if rc != "" && rc != encoding.Identity { - ss.decomp = encoding.GetCompressor(rc) - if ss.decomp == nil { + ss.decompressorV1 = encoding.GetCompressor(rc) + if ss.decompressorV1 == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) return st.Err() } } @@ -1658,12 +1665,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran // // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { - ss.cp = s.opts.cp + ss.compressorV0 = s.opts.cp ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. - ss.comp = encoding.GetCompressor(rc) - if ss.comp != nil { + ss.compressorV1 = encoding.GetCompressor(rc) + if ss.compressorV1 != nil { ss.sendCompressorName = rc } } @@ -1674,7 +1681,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran } } - ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1) if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1717,7 +1724,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - t.WriteStatus(ss.s, appStatus) + ss.s.WriteStatus(appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1735,10 +1742,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - return t.WriteStatus(ss.s, statusOK) + return ss.s.WriteStatus(statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1768,7 +1775,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1783,17 +1790,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) + // FromIncomingContext is expensive: skip if there are no statsHandlers + if len(s.opts.statsHandlers) > 0 { + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1802,17 +1812,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) + s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + s.processStreamingRPC(ctx, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1825,7 +1835,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1925,7 +1935,7 @@ func (s *Server) stop(graceful bool) { s.conns = nil if s.opts.numServerWorkers > 0 { - // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // Closing the channel (only once, via sync.OnceFunc) after all the // connections have been closed above ensures that there are no // goroutines executing the callback passed to st.HandleStreams (where // the channel is written to). @@ -2100,7 +2110,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2122,7 +2132,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 2671c5ef69f..8d451e07c7c 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -168,6 +168,7 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } + func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -267,18 +268,21 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } +func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { + return jrp.MaxAttempts > 1 && + jrp.InitialBackoff > 0 && + jrp.MaxBackoff > 0 && + jrp.BackoffMultiplier > 0 && + len(jrp.RetryableStatusCodes) > 0 +} + func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } - if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil + if !isValidRetryPolicy(jrp) { + return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp) } if jrp.MaxAttempts < maxAttempts { @@ -297,7 +301,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -func min(a, b *int) *int { +func minPointers(a, b *int) *int { if *a < *b { return a } @@ -309,7 +313,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) + return minPointers(mcMax, doptMax) } if mcMax != nil { return mcMax diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go new file mode 100644 index 00000000000..641c8e9794a --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/metrics.go @@ -0,0 +1,81 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import "maps" + +// MetricSet is a set of metrics to record. Once created, MetricSet is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetricSet instead. +type MetricSet struct { + // metrics are the set of metrics to initialize. + metrics map[string]bool +} + +// NewMetricSet returns a MetricSet containing metricNames. +func NewMetricSet(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *MetricSet) Metrics() map[string]bool { + return m.metrics +} + +// Add adds the metricNames to the metrics set and returns a new copy with the +// additional metrics. +func (m *MetricSet) Add(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *MetricSet) Join(metrics *MetricSet) *MetricSet { + newMetrics := make(map[string]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &MetricSet{metrics: newMetrics} +} + +// Remove removes the metricNames from the metrics set and returns a new copy +// with the metrics removed. +func (m *MetricSet) Remove(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + delete(newMetrics, metric) + } + return &MetricSet{metrics: newMetrics} +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 71195c4943d..6f20d2d5486 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -260,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) } // Tags returns the tags from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) } // Trace returns the trace from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb2b2a216ce..12163150ba7 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - "math/rand" + rand "math/rand/v2" "strconv" "sync" "time" @@ -113,7 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. + // the stream may be discovered using RecvMsg. For unary or server-streaming + // RPCs (StreamDesc.ClientStreams is false), a nil error is returned + // unconditionally. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -216,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -256,9 +258,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { - c := defaultCallInfo() + callInfo := defaultCallInfo() if mc.WaitForReady != nil { - c.failFast = !*mc.WaitForReady + callInfo.failFast = !*mc.WaitForReady } // Possible context leak: @@ -279,20 +281,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client }() for _, o := range opts { - if err := o.before(c); err != nil { + if err := o.before(callInfo); err != nil { return nil, toRPCErr(err) } } - c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) - c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - if err := setCallInfoCodec(c); err != nil { + callInfo.maxSendMessageSize = getMaxSize(mc.MaxReqSize, callInfo.maxSendMessageSize, defaultClientMaxSendMessageSize) + callInfo.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, callInfo.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(callInfo); err != nil { return nil, err } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, - ContentSubtype: c.contentSubtype, + ContentSubtype: callInfo.contentSubtype, DoneFunc: doneFunc, } @@ -300,22 +302,22 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client // set. In that case, also find the compressor from the encoding package. // Otherwise, use the compressor configured by the WithCompressor DialOption, // if set. - var cp Compressor - var comp encoding.Compressor - if ct := c.compressorType; ct != "" { + var compressorV0 Compressor + var compressorV1 encoding.Compressor + if ct := callInfo.compressorName; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { - comp = encoding.GetCompressor(ct) - if comp == nil { + compressorV1 = encoding.GetCompressor(ct) + if compressorV1 == nil { return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } - } else if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - cp = cc.dopts.cp + } else if cc.dopts.compressorV0 != nil { + callHdr.SendCompress = cc.dopts.compressorV0.Type() + compressorV0 = cc.dopts.compressorV0 } - if c.creds != nil { - callHdr.Creds = c.creds + if callInfo.creds != nil { + callHdr.Creds = callInfo.creds } cs := &clientStream{ @@ -323,12 +325,12 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client ctx: ctx, methodConfig: &mc, opts: opts, - callInfo: c, + callInfo: callInfo, cc: cc, desc: desc, - codec: c.codec, - cp: cp, - comp: comp, + codec: callInfo.codec, + compressorV0: compressorV0, + compressorV1: compressorV1, cancel: cancel, firstAttempt: true, onCommit: onCommit, @@ -410,7 +412,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) return nil, ErrClientConnClosing } - ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time shs := cs.cc.dopts.copts.StatsHandlers @@ -452,12 +454,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) } return &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandlers: shs, - trInfo: trInfo, + ctx: ctx, + beginTime: beginTime, + cs: cs, + decompressorV0: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, }, nil } @@ -465,7 +467,7 @@ func (a *csAttempt) getTransport() error { cs := a.cs var err error - a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -474,7 +476,7 @@ func (a *csAttempt) getTransport() error { return err } if a.trInfo != nil { - a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } return nil } @@ -501,7 +503,7 @@ func (a *csAttempt) newStream() error { a.ctx = metadata.NewOutgoingContext(a.ctx, md) } - s, err := a.t.NewStream(a.ctx, cs.callHdr) + s, err := a.transport.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) if !ok { @@ -516,9 +518,9 @@ func (a *csAttempt) newStream() error { // Unwrap and convert error. return toRPCErr(nse.Err) } - a.s = s + a.transportStream = s a.ctx = s.Context() - a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -530,9 +532,9 @@ type clientStream struct { cc *ClientConn desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor + codec baseCodec + compressorV0 Compressor + compressorV1 encoding.Compressor cancel context.CancelFunc // cancels all attempts @@ -581,17 +583,17 @@ type replayOp struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - pickResult balancer.PickResult - - finished bool - dc Decompressor - decomp encoding.Compressor - decompSet bool + ctx context.Context + cs *clientStream + transport transport.ClientTransport + transportStream *transport.ClientStream + parser *parser + pickResult balancer.PickResult + + finished bool + decompressorV0 Decompressor + decompressorV1 encoding.Compressor + decompressorSet bool mu sync.Mutex // guards trInfo.tr // trInfo may be nil (if EnableTracing is false). @@ -637,14 +639,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } - if a.s == nil && a.allowTransparentRetry { + if a.transportStream == nil && a.allowTransparentRetry { return true, nil } // Wait for the trailers. unprocessed := false - if a.s != nil { - <-a.s.Done() - unprocessed = a.s.Unprocessed() + if a.transportStream != nil { + <-a.transportStream.Done() + unprocessed = a.transportStream.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -656,14 +658,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if a.s != nil { - if !a.s.TrailersOnly() { + if a.transportStream != nil { + if !a.transportStream.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := a.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.transportStream.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -680,8 +682,8 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { } var code codes.Code - if a.s != nil { - code = a.s.Status().Code() + if a.transportStream != nil { + code = a.transportStream.Status().Code() } else { code = status.Code(err) } @@ -706,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(rand.Int63n(int64(cur))) + cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) + // Apply jitter by multiplying with a random factor between 0.8 and 1.2 + cur *= 0.8 + 0.4*rand.Float64() + dur = time.Duration(int64(cur)) cs.numRetriesSincePushback++ } @@ -755,8 +756,8 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - if cs.attempt.s != nil { - return cs.attempt.s.Context() + if cs.attempt.transportStream != nil { + return cs.attempt.transportStream.Context() } return cs.ctx } @@ -793,9 +794,9 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) continue } if err == io.EOF { - <-a.s.Done() + <-a.transportStream.Done() } - if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + if err == nil || (err == io.EOF && a.transportStream.Status().Code() == codes.OK) { onSuccess() cs.mu.Unlock() return err @@ -811,7 +812,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD err := cs.withRetry(func(a *csAttempt) error { var err error - m, err = a.s.Header() + m, err = a.transportStream.Header() return toRPCErr(err) }, cs.commitAttemptLocked) @@ -855,10 +856,10 @@ func (cs *clientStream) Trailer() metadata.MD { // directions -- it will prevent races and should not meaningfully impact // performance. cs.commitAttempt() - if cs.attempt.s == nil { + if cs.attempt.transportStream == nil { return nil } - return cs.attempt.s.Trailer() + return cs.attempt.transportStream.Trailer() } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { @@ -903,7 +904,7 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.compressorV0, cs.compressorV1, cs.cc.dopts.copts.BufferPool) if err != nil { return err } @@ -991,7 +992,7 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + a.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1029,7 +1030,7 @@ func (cs *clientStream) finish(err error) { if cs.attempt != nil { cs.attempt.finish(err) // after functions all rely upon having a stream. - if cs.attempt.s != nil { + if cs.attempt.transportStream != nil { for _, o := range cs.opts { o.after(cs.callInfo, cs.attempt) } @@ -1083,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.transportStream.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1097,9 +1098,6 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } - if channelz.IsOn() { - a.t.IncrMsgSent() - } return nil } @@ -1110,25 +1108,25 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { defer payInfo.free() } - if !a.decompSet { + if !a.decompressorSet { // Block until we receive headers containing received message encoding. - if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if a.dc == nil || a.dc.Type() != ct { + if ct := a.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.decompressorV0 == nil || a.decompressorV0.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. - a.dc = nil - a.decomp = encoding.GetCompressor(ct) + a.decompressorV0 = nil + a.decompressorV1 = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. - a.dc = nil + a.decompressorV0 = nil } // Only initialize this state once per stream. - a.decompSet = true + a.decompressorSet = true } - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { + if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { - if statusErr := a.s.Status().Err(); statusErr != nil { + if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. @@ -1153,17 +1151,14 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { - return a.s.Status().Err() // non-server streaming Recv returns nil on success + if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } @@ -1182,20 +1177,20 @@ func (a *csAttempt) finish(err error) { err = nil } var tr metadata.MD - if a.s != nil { - a.t.CloseStream(a.s, err) - tr = a.s.Trailer() + if a.transportStream != nil { + a.transportStream.Close(err) + tr = a.transportStream.Trailer() } if a.pickResult.Done != nil { br := false - if a.s != nil { - br = a.s.BytesReceived() + if a.transportStream != nil { + br = a.transportStream.BytesReceived() } a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, - BytesSent: a.s != nil, + BytesSent: a.transportStream != nil, BytesReceived: br, ServerLoad: balancerload.Parse(tr), }) @@ -1277,7 +1272,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin // if set. var cp Compressor var comp encoding.Compressor - if ct := c.compressorType; ct != "" { + if ct := c.compressorName; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { comp = encoding.GetCompressor(ct) @@ -1285,9 +1280,9 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } - } else if ac.cc.dopts.cp != nil { - callHdr.SendCompress = ac.cc.dopts.cp.Type() - cp = ac.cc.dopts.cp + } else if ac.cc.dopts.compressorV0 != nil { + callHdr.SendCompress = ac.cc.dopts.compressorV0.Type() + cp = ac.cc.dopts.compressorV0 } if c.creds != nil { callHdr.Creds = c.creds @@ -1295,26 +1290,26 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin // Use a special addrConnStream to avoid retry. as := &addrConnStream{ - callHdr: callHdr, - ac: ac, - ctx: ctx, - cancel: cancel, - opts: opts, - callInfo: c, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - t: t, - } - - s, err := as.t.NewStream(as.ctx, as.callHdr) + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + sendCompressorV0: cp, + sendCompressorV1: comp, + transport: t, + } + + s, err := as.transport.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) return nil, err } - as.s = s - as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.transportStream = s + as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1340,29 +1335,31 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.Stream - ac *addrConn - callHdr *transport.CallHdr - cancel context.CancelFunc - opts []CallOption - callInfo *callInfo - t transport.ClientTransport - ctx context.Context - sentLast bool - desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor - decompSet bool - dc Decompressor - decomp encoding.Compressor - p *parser - mu sync.Mutex - finished bool + transportStream *transport.ClientStream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + transport transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + sendCompressorV0 Compressor + sendCompressorV1 encoding.Compressor + decompressorSet bool + decompressorV0 Decompressor + decompressorV1 encoding.Compressor + parser *parser + + // mu guards finished and is held for the entire finish method. + mu sync.Mutex + finished bool } func (as *addrConnStream) Header() (metadata.MD, error) { - m, err := as.s.Header() + m, err := as.transportStream.Header() if err != nil { as.finish(toRPCErr(err)) } @@ -1370,7 +1367,7 @@ func (as *addrConnStream) Header() (metadata.MD, error) { } func (as *addrConnStream) Trailer() metadata.MD { - return as.s.Trailer() + return as.transportStream.Trailer() } func (as *addrConnStream) CloseSend() error { @@ -1380,7 +1377,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + as.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1389,7 +1386,7 @@ func (as *addrConnStream) CloseSend() error { } func (as *addrConnStream) Context() context.Context { - return as.s.Context() + return as.transportStream.Context() } func (as *addrConnStream) SendMsg(m any) (err error) { @@ -1411,7 +1408,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.sendCompressorV0, as.sendCompressorV1, as.ac.dopts.copts.BufferPool) if err != nil { return err } @@ -1430,7 +1427,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.transportStream.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1440,9 +1437,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } - if channelz.IsOn() { - as.t.IncrMsgSent() - } return nil } @@ -1454,25 +1448,25 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { } }() - if !as.decompSet { + if !as.decompressorSet { // Block until we receive headers containing received message encoding. - if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if as.dc == nil || as.dc.Type() != ct { + if ct := as.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.decompressorV0 == nil || as.decompressorV0.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. - as.dc = nil - as.decomp = encoding.GetCompressor(ct) + as.decompressorV0 = nil + as.decompressorV1 = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. - as.dc = nil + as.decompressorV0 = nil } // Only initialize this state once per stream. - as.decompSet = true + as.decompressorSet = true } - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { + if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { - if statusErr := as.s.Status().Err(); statusErr != nil { + if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. @@ -1480,9 +1474,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } - if channelz.IsOn() { - as.t.IncrMsgRecv() - } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1490,8 +1481,8 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { - return as.s.Status().Err() // non-server streaming Recv returns nil on success + if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } @@ -1509,8 +1500,8 @@ func (as *addrConnStream) finish(err error) { // Ending a stream with EOF indicates a success. err = nil } - if as.s != nil { - as.t.CloseStream(as.s, err) + if as.transportStream != nil { + as.transportStream.Close(err) } if err != nil { @@ -1577,15 +1568,14 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context - t transport.ServerTransport - s *transport.Stream + s *transport.ServerStream p *parser codec baseCodec - cp Compressor - dc Decompressor - comp encoding.Compressor - decomp encoding.Compressor + compressorV0 Compressor + compressorV1 encoding.Compressor + decompressorV0 Decompressor + decompressorV1 encoding.Compressor sendCompressorName string @@ -1628,7 +1618,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } - err = ss.t.WriteHeader(ss.s, md) + err = ss.s.SendHeader(md) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1668,7 +1658,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1676,20 +1666,17 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } }() // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { - ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.compressorV1 = encoding.GetCompressor(sendCompressorsName) ss.sendCompressorName = sendCompressorsName } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.compressorV0, ss.compressorV1, ss.p.bufferPool) if err != nil { return err } @@ -1710,7 +1697,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { return toRPCErr(err) } @@ -1756,7 +1743,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1764,16 +1751,13 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1784,7 +1768,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return err } if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error()) } return toRPCErr(err) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index d50e843598c..3c148a814f2 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.68.0" +const Version = "1.71.1" diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 5a57ef6f3c80a4a930b7bdb33b039ea94d1eb5f2..323829da1477e4496d664b2a1092a9f9cec275d4 100644 GIT binary patch literal 146 zcmX}mF%Ezr3X5(&e%rBRTLK{CjOa+)E@2mYkk=mEF7 B6)FG# literal 138 zcmd;*muO*EV!mX@pe4$|D8MAaq`<7fXux#Ijt$6VkYMDJmv|0Wz$CyZ!KlClRKN&Q wzyMY7f?Y`%s2WL*1th1%ddZFnY{E-+C6MVz3P75fB^b3pHY+@1*LcYe04AXnGXMYp diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 10132c9b384..b08b71830c6 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -69,6 +69,9 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value case genid.FeatureSet_JsonFormat_field_number: parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + case genid.FeatureSet_EnforceNamingStyle_field_number: + // EnforceNamingStyle is enforced in protoc, languages other than C++ + // are not supposed to do anything with this feature. default: panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index f30ab6b586f..39524782add 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -1014,6 +1014,7 @@ const ( FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" @@ -1021,6 +1022,7 @@ const ( FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" ) // Field numbers for google.protobuf.FeatureSet. @@ -1031,6 +1033,7 @@ const ( FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1112,6 +1115,19 @@ const ( FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 ) +// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle" + FeatureSet_EnforceNamingStyle_enum_name = "EnforceNamingStyle" +) + +// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0 + FeatureSet_STYLE2024_enum_value = 1 + FeatureSet_STYLE_LEGACY_enum_value = 2 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go similarity index 99% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 1ffddf6877a..42dd6f70c6f 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package strs import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go deleted file mode 100644 index 832a7988f14..00000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package strs - -import ( - "unsafe" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } -) - -// UnsafeString returns an unsafe string reference of b. -// The caller must treat the input slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user -// unless the input slice is provably immutable. -func UnsafeString(b []byte) (s string) { - src := (*sliceHeader)(unsafe.Pointer(&b)) - dst := (*stringHeader)(unsafe.Pointer(&s)) - dst.Data = src.Data - dst.Len = src.Len - return s -} - -// UnsafeBytes returns an unsafe bytes slice reference of s. -// The caller must treat returned slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user. -func UnsafeBytes(s string) (b []byte) { - src := (*stringHeader)(unsafe.Pointer(&s)) - dst := (*sliceHeader)(unsafe.Pointer(&b)) - dst.Data = src.Data - dst.Len = src.Len - dst.Cap = src.Len - return b -} - -// Builder builds a set of strings with shared lifetime. -// This differs from strings.Builder, which is for building a single string. -type Builder struct { - buf []byte -} - -// AppendFullName is equivalent to protoreflect.FullName.Append, -// but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { - n := len(prefix) + len(".") + len(name) - if len(prefix) == 0 { - n -= len(".") - } - sb.grow(n) - sb.buf = append(sb.buf, prefix...) - sb.buf = append(sb.buf, '.') - sb.buf = append(sb.buf, name...) - return protoreflect.FullName(sb.last(n)) -} - -// MakeString is equivalent to string(b), but optimized for large batches -// with a shared lifetime. -func (sb *Builder) MakeString(b []byte) string { - sb.grow(len(b)) - sb.buf = append(sb.buf, b...) - return sb.last(len(b)) -} - -func (sb *Builder) grow(n int) { - if cap(sb.buf)-len(sb.buf) >= n { - return - } - - // Unlike strings.Builder, we do not need to copy over the contents - // of the old buffer since our builder provides no API for - // retrieving previously created strings. - sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) -} - -func (sb *Builder) last(n int) string { - return UnsafeString(sb.buf[len(sb.buf)-n:]) -} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 01efc33030d..aac1cb18a74 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 5 + Patch = 6 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index 3c6fe57807b..ef55b97dded 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -59,6 +59,12 @@ func Clone(m Message) Message { return dst.Interface() } +// CloneOf returns a deep copy of m. If the top-level message is invalid, +// it returns an invalid message as well. +func CloneOf[M Message](m M) M { + return Clone(m).(M) +} + // mergeOptions provides a namespace for merge functions, and can be // exported in the future if we add user-visible merge options. type mergeOptions struct{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index ea154eec44d..a4a0a2971dd 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -398,6 +398,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "message_encoding", nil) case 6: b = p.appendSingularField(b, "json_format", nil) + case 7: + b = p.appendSingularField(b, "enforce_naming_style", nil) } return b } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go similarity index 99% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index 479527b58dd..fe17f37220e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package protoreflect import ( diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go deleted file mode 100644 index 0015fcb35d8..00000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package protoreflect - -import ( - "unsafe" - - "google.golang.org/protobuf/internal/pragma" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } - ifaceHeader struct { - Type unsafe.Pointer - Data unsafe.Pointer - } -) - -var ( - nilType = typeOf(nil) - boolType = typeOf(*new(bool)) - int32Type = typeOf(*new(int32)) - int64Type = typeOf(*new(int64)) - uint32Type = typeOf(*new(uint32)) - uint64Type = typeOf(*new(uint64)) - float32Type = typeOf(*new(float32)) - float64Type = typeOf(*new(float64)) - stringType = typeOf(*new(string)) - bytesType = typeOf(*new([]byte)) - enumType = typeOf(*new(EnumNumber)) -) - -// typeOf returns a pointer to the Go type information. -// The pointer is comparable and equal if and only if the types are identical. -func typeOf(t any) unsafe.Pointer { - return (*ifaceHeader)(unsafe.Pointer(&t)).Type -} - -// value is a union where only one type can be represented at a time. -// The struct is 24B large on 64-bit systems and requires the minimum storage -// necessary to represent each possible type. -// -// The Go GC needs to be able to scan variables containing pointers. -// As such, pointers and non-pointers cannot be intermixed. -type value struct { - pragma.DoNotCompare // 0B - - // typ stores the type of the value as a pointer to the Go type. - typ unsafe.Pointer // 8B - - // ptr stores the data pointer for a String, Bytes, or interface value. - ptr unsafe.Pointer // 8B - - // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or - // Enum value as a raw uint64. - // - // It is also used to store the length of a String or Bytes value; - // the capacity is ignored. - num uint64 // 8B -} - -func valueOfString(v string) Value { - p := (*stringHeader)(unsafe.Pointer(&v)) - return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfBytes(v []byte) Value { - p := (*sliceHeader)(unsafe.Pointer(&v)) - return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfIface(v any) Value { - p := (*ifaceHeader)(unsafe.Pointer(&v)) - return Value{typ: p.Type, ptr: p.Data} -} - -func (v Value) getString() (x string) { - *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} - return x -} -func (v Value) getBytes() (x []byte) { - *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} - return x -} -func (v Value) getIface() (x any) { - *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} - return x -} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a5163376741..7fe280f194c 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1139,6 +1139,65 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} } +type FeatureSet_EnforceNamingStyle int32 + +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0 + FeatureSet_STYLE2024 FeatureSet_EnforceNamingStyle = 1 + FeatureSet_STYLE_LEGACY FeatureSet_EnforceNamingStyle = 2 +) + +// Enum value maps for FeatureSet_EnforceNamingStyle. +var ( + FeatureSet_EnforceNamingStyle_name = map[int32]string{ + 0: "ENFORCE_NAMING_STYLE_UNKNOWN", + 1: "STYLE2024", + 2: "STYLE_LEGACY", + } + FeatureSet_EnforceNamingStyle_value = map[string]int32{ + "ENFORCE_NAMING_STYLE_UNKNOWN": 0, + "STYLE2024": 1, + "STYLE_LEGACY": 2, + } +) + +func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle { + p := new(FeatureSet_EnforceNamingStyle) + *p = x + return p +} + +func (x FeatureSet_EnforceNamingStyle) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() +} + +func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[16] +} + +func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnforceNamingStyle(num) + return nil +} + +// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead. +func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1177,11 +1236,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[17] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1277,8 +1336,14 @@ type FileDescriptorProto struct { // The supported values are "proto2", "proto3", and "editions". // // If `edition` is present, this value must be "editions". + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -2212,6 +2277,9 @@ type FileOptions struct { // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. @@ -2482,6 +2550,9 @@ type MessageOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2648,6 +2719,9 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. @@ -2799,6 +2873,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2871,6 +2948,9 @@ type EnumOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2958,6 +3038,9 @@ type EnumValueOptions struct { // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` // Indicate that fields annotated with this enum value should not be printed // out when using debug formats, e.g. when the field contains sensitive @@ -3046,6 +3129,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { type ServiceOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations @@ -3124,6 +3210,9 @@ type MethodOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -3310,6 +3399,7 @@ type FeatureSet struct { Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` extensionFields protoimpl.ExtensionFields unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -3387,6 +3477,13 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { return FeatureSet_JSON_FORMAT_UNKNOWN } +func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { + if x != nil && x.EnforceNamingStyle != nil { + return *x.EnforceNamingStyle + } + return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4361,777 +4458,367 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, - 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, - 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, - 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, - 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, - 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, - 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, - 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, - 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, - 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, - 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, - 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, - 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, - 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, - 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, - 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, - 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, - 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, - 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, - 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, - 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, - 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, - 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, - 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, - 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, - 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, - 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, - 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, - 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, - 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, - 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, - 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, - 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, - 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, - 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, - 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, - 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, - 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, - 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, - 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, - 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, - 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, - 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, - 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, - 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, - 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, - 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, - 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, - 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, - 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, - 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, - 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, - 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, - 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, - 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, - 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, - 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, - 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, - 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, - 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, - 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, - 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, - 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, - 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, - 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, - 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, - 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, - 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, - 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, - 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, - 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, - 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, - 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, - 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -}) +const file_google_protobuf_descriptor_proto_rawDesc = "" + + "\n" + + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + + "\x11FileDescriptorSet\x128\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" + + "\x13FileDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + + "\n" + + "dependency\x18\x03 \x03(\tR\n" + + "dependency\x12+\n" + + "\x11public_dependency\x18\n" + + " \x03(\x05R\x10publicDependency\x12'\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" + + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + + "\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" + + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" + + "\x0fDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + + "\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" + + "\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" + + "nestedType\x12A\n" + + "\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" + + "\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" + + "\n" + + "oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" + + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\n" + + " \x03(\tR\freservedName\x1az\n" + + "\x0eExtensionRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + + "\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" + + "\rReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" + + "\x15ExtensionRangeOptions\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" + + "\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" + + "\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" + + "UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" + + "\vDeclaration\x12\x16\n" + + "\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" + + "\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" + + "\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" + + "\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" + + "\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" + + "\x11VerificationState\x12\x0f\n" + + "\vDECLARATION\x10\x00\x12\x0e\n" + + "\n" + + "UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" + + "\x14FieldDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" + + "\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" + + "\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" + + "\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" + + "\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" + + "\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" + + "\voneof_index\x18\t \x01(\x05R\n" + + "oneofIndex\x12\x1b\n" + + "\tjson_name\x18\n" + + " \x01(\tR\bjsonName\x127\n" + + "\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" + + "\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" + + "\x04Type\x12\x0f\n" + + "\vTYPE_DOUBLE\x10\x01\x12\x0e\n" + + "\n" + + "TYPE_FLOAT\x10\x02\x12\x0e\n" + + "\n" + + "TYPE_INT64\x10\x03\x12\x0f\n" + + "\vTYPE_UINT64\x10\x04\x12\x0e\n" + + "\n" + + "TYPE_INT32\x10\x05\x12\x10\n" + + "\fTYPE_FIXED64\x10\x06\x12\x10\n" + + "\fTYPE_FIXED32\x10\a\x12\r\n" + + "\tTYPE_BOOL\x10\b\x12\x0f\n" + + "\vTYPE_STRING\x10\t\x12\x0e\n" + + "\n" + + "TYPE_GROUP\x10\n" + + "\x12\x10\n" + + "\fTYPE_MESSAGE\x10\v\x12\x0e\n" + + "\n" + + "TYPE_BYTES\x10\f\x12\x0f\n" + + "\vTYPE_UINT32\x10\r\x12\r\n" + + "\tTYPE_ENUM\x10\x0e\x12\x11\n" + + "\rTYPE_SFIXED32\x10\x0f\x12\x11\n" + + "\rTYPE_SFIXED64\x10\x10\x12\x0f\n" + + "\vTYPE_SINT32\x10\x11\x12\x0f\n" + + "\vTYPE_SINT64\x10\x12\"C\n" + + "\x05Label\x12\x12\n" + + "\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" + + "\x0eLABEL_REPEATED\x10\x03\x12\x12\n" + + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + + "\x14OneofDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" + + "\x13EnumDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" + + "\x11EnumReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + + "\x18EnumValueDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" + + "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" + + "\x16ServiceDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" + + "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" + + "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" + + "\x15MethodDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" + + "\n" + + "input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" + + "\voutput_type\x18\x03 \x01(\tR\n" + + "outputType\x128\n" + + "\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" + + "\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" + + "\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" + + "\vFileOptions\x12!\n" + + "\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" + + "\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" + + "\x13java_multiple_files\x18\n" + + " \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" + + "\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" + + "\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" + + "\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" + + "\n" + + "go_package\x18\v \x01(\tR\tgoPackage\x125\n" + + "\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" + + "\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" + + "\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" + + "\n" + + "deprecated\x18\x17 \x01(\b:\x05falseR\n" + + "deprecated\x12.\n" + + "\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" + + "\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" + + "\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" + + "\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" + + "\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" + + "\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" + + "\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" + + "\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" + + "\fOptimizeMode\x12\t\n" + + "\x05SPEED\x10\x01\x12\r\n" + + "\tCODE_SIZE\x10\x02\x12\x10\n" + + "\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" + + "\x0eMessageOptions\x12<\n" + + "\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" + + "\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x1b\n" + + "\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "\"\x9d\r\n" + + "\fFieldOptions\x12A\n" + + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + + "\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" + + "\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" + + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x19\n" + + "\x04weak\x18\n" + + " \x01(\b:\x05falseR\x04weak\x12(\n" + + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + + "\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" + + "\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" + + "\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" + + "\x0eEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" + + "\x0eFeatureSupport\x12G\n" + + "\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" + + "\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" + + "\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" + + "\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" + + "\x05CType\x12\n" + + "\n" + + "\x06STRING\x10\x00\x12\b\n" + + "\x04CORD\x10\x01\x12\x10\n" + + "\fSTRING_PIECE\x10\x02\"5\n" + + "\x06JSType\x12\r\n" + + "\tJS_NORMAL\x10\x00\x12\r\n" + + "\tJS_STRING\x10\x01\x12\r\n" + + "\tJS_NUMBER\x10\x02\"U\n" + + "\x0fOptionRetention\x12\x15\n" + + "\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" + + "\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" + + "\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" + + "\x10OptionTargetType\x12\x17\n" + + "\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" + + "\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" + + "\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" + + "\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" + + "\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" + + "\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" + + "\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" + + "\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" + + "\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" + + "\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" + + "\fOneofOptions\x127\n" + + "\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" + + "\vEnumOptions\x12\x1f\n" + + "\vallow_alias\x18\x02 \x01(\bR\n" + + "allowAlias\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" + + "\x10EnumValueOptions\x12%\n" + + "\n" + + "deprecated\x18\x01 \x01(\b:\x05falseR\n" + + "deprecated\x127\n" + + "\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" + + "\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" + + "\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" + + "\x0eServiceOptions\x127\n" + + "\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" + + "\rMethodOptions\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12q\n" + + "\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" + + "\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" + + "\x10IdempotencyLevel\x12\x17\n" + + "\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" + + "\n" + + "IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" + + "\x13UninterpretedOption\x12A\n" + + "\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" + + "\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" + + "\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" + + "\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" + + "\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" + + "\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" + + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + + "\bNamePart\x12\x1b\n" + + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" + + "\n" + + "FeatureSet\x12\x91\x01\n" + + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + + "\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" + + "\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" + + "\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" + + "\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" + + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + + "jsonFormat\x12\xab\x01\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" + + "\rFieldPresence\x12\x1a\n" + + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + + "\bEXPLICIT\x10\x01\x12\f\n" + + "\bIMPLICIT\x10\x02\x12\x13\n" + + "\x0fLEGACY_REQUIRED\x10\x03\"7\n" + + "\bEnumType\x12\x15\n" + + "\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" + + "\x04OPEN\x10\x01\x12\n" + + "\n" + + "\x06CLOSED\x10\x02\"V\n" + + "\x15RepeatedFieldEncoding\x12#\n" + + "\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06PACKED\x10\x01\x12\f\n" + + "\bEXPANDED\x10\x02\"I\n" + + "\x0eUtf8Validation\x12\x1b\n" + + "\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06VERIFY\x10\x02\x12\b\n" + + "\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" + + "\x0fMessageEncoding\x12\x1c\n" + + "\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" + + "\tDELIMITED\x10\x02\"H\n" + + "\n" + + "JsonFormat\x12\x17\n" + + "\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" + + "\x05ALLOW\x10\x01\x12\x16\n" + + "\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" + + "\x12EnforceNamingStyle\x12 \n" + + "\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" + + "\tSTYLE2024\x10\x01\x12\x10\n" + + "\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" + + "\x12FeatureSetDefaults\x12X\n" + + "\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" + + "\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" + + "\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" + + "\x18FeatureSetEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" + + "\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" + + "\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" + + "\x0eSourceCodeInfo\x12D\n" + + "\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" + + "\bLocation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" + + "\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" + + "\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" + + "\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" + + "\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" + + "\x11GeneratedCodeInfo\x12M\n" + + "\n" + + "annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" + + "annotation\x1a\xeb\x01\n" + + "\n" + + "Annotation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" + + "\vsource_file\x18\x02 \x01(\tR\n" + + "sourceFile\x12\x14\n" + + "\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" + + "\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" + + "\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" + + "\bSemantic\x12\b\n" + + "\x04NONE\x10\x00\x12\a\n" + + "\x03SET\x10\x01\x12\t\n" + + "\x05ALIAS\x10\x02*\xa7\x02\n" + + "\aEdition\x12\x13\n" + + "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" + + "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" + + "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" + + "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" + + "\fEDITION_2023\x10\xe8\a\x12\x11\n" + + "\fEDITION_2024\x10\xe9\a\x12\x17\n" + + "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" + + "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" + + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" + + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once @@ -5145,7 +4832,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition @@ -5164,124 +4851,126 @@ var file_google_protobuf_descriptor_proto_goTypes = []any{ (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 27: google.protobuf.FileOptions - (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation + (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle + (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 28: google.protobuf.FileOptions + (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 77, // [77:77] is the sub-list for method output_type - 77, // [77:77] is the sub-list for method input_type - 77, // [77:77] is the sub-list for extension type_name - 77, // [77:77] is the sub-list for extension extendee - 0, // [0:77] is the sub-list for field type_name + 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 78, // [78:78] is the sub-list for method output_type + 78, // [78:78] is the sub-list for method input_type + 78, // [78:78] is the sub-list for extension type_name + 78, // [78:78] is the sub-list for extension extendee + 0, // [0:78] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5294,7 +4983,7 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 17, + NumEnums: 18, NumMessages: 33, NumExtensions: 0, NumServices: 0, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index 28d24bad79e..37e712b6b72 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -228,63 +228,29 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, - 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, - 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, - 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, - 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, - 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, - 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, - 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, - 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, - 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, - 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, - 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, - 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, - 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, - 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, - 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, - 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -}) +const file_google_protobuf_go_features_proto_rawDesc = "" + + "\n" + + "!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" + + "\n" + + "GoFeatures\x12\xbe\x01\n" + + "\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" + + "\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" + + "\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" + + "API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" + + "\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" + + "\bAPILevel\x12\x19\n" + + "\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" + + "\bAPI_OPEN\x10\x01\x12\x0e\n" + + "\n" + + "API_HYBRID\x10\x02\x12\x0e\n" + + "\n" + + "API_OPAQUE\x10\x03\"\x92\x01\n" + + "\x0fStripEnumPrefix\x12!\n" + + "\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" + + "\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" + + "\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" + + "\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" + + "\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb" var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 497da66e91f..1ff0d1494d4 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -412,23 +412,13 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = string([]byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, - 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, - 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, - 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_any_proto_rawDesc = "" + + "\n" + + "\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" + + "\x03Any\x12\x19\n" + + "\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05valueBv\n" + + "\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_any_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 193880d1813..ca2e7b38f49 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -289,24 +289,13 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, - 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_duration_proto_rawDesc = "" + + "\n" + + "\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" + + "\bDuration\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" + + "\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index a5b8657c4ba..1d7ee3b4766 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -86,20 +86,12 @@ func (*Empty) Descriptor() ([]byte, []int) { var File_google_protobuf_empty_proto protoreflect.FileDescriptor -var file_google_protobuf_empty_proto_rawDesc = string([]byte{ - 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, - 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, - 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, - 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_empty_proto_rawDesc = "" + + "\n" + + "\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" + + "\x05EmptyB}\n" + + "\x13com.google.protobufB\n" + + "EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_empty_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index 041feb0f3ec..91ee89a5cd9 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -504,23 +504,12 @@ func (x *FieldMask) GetPaths() []string { var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor -var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, - 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, - 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_field_mask_proto_rawDesc = "" + + "\n" + + " google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n" + + "\tFieldMask\x12\x14\n" + + "\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n" + + "\x13com.google.protobufB\x0eFieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_field_mask_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index ecdd31ab538..30411b72838 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -672,55 +672,31 @@ func (x *ListValue) GetValues() []*Value { var File_google_protobuf_struct_proto protoreflect.FileDescriptor -var file_google_protobuf_struct_proto_rawDesc = string([]byte{ - 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, - 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, - 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, - 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, - 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, - 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, - 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, - 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -}) +const file_google_protobuf_struct_proto_rawDesc = "" + + "\n" + + "\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" + + "\x06Struct\x12;\n" + + "\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" + + "\vFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12,\n" + + "\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" + + "\x05Value\x12;\n" + + "\n" + + "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" + + "\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" + + "\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" + + "\n" + + "bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" + + "\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" + + "\n" + + "list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" + + "\x04kind\";\n" + + "\tListValue\x12.\n" + + "\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" + + "\tNullValue\x12\x0e\n" + + "\n" + + "NULL_VALUE\x10\x00B\x7f\n" + + "\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_struct_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 00ac835c0bb..06d584c14be 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -298,24 +298,13 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_timestamp_proto_rawDesc = "" + + "\n" + + "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" + + "\tTimestamp\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" + + "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 5de5301063b..b7c2d0607d7 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -28,10 +28,17 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. +// Wrappers for primitive (non-message) types. These types were needed +// for legacy reasons and are not recommended for use in new APIs. +// +// Historically these wrappers were useful to have presence on proto3 primitive +// fields, but proto3 syntax has been updated to support the `optional` keyword. +// Using that keyword is now the strongly preferred way to add presence to +// proto3 primitive fields. +// +// A secondary usecase was to embed primitives in the `google.protobuf.Any` +// type: it is now recommended that you embed your value in your own wrapper +// message which can be specifically documented. // // These wrappers have no meaningful use within repeated fields as they lack // the ability to detect presence on individual elements. @@ -54,6 +61,9 @@ import ( // Wrapper message for `double`. // // The JSON representation for `DoubleValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type DoubleValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The double value. @@ -107,6 +117,9 @@ func (x *DoubleValue) GetValue() float64 { // Wrapper message for `float`. // // The JSON representation for `FloatValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type FloatValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The float value. @@ -160,6 +173,9 @@ func (x *FloatValue) GetValue() float32 { // Wrapper message for `int64`. // // The JSON representation for `Int64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type Int64Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The int64 value. @@ -213,6 +229,9 @@ func (x *Int64Value) GetValue() int64 { // Wrapper message for `uint64`. // // The JSON representation for `UInt64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type UInt64Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The uint64 value. @@ -266,6 +285,9 @@ func (x *UInt64Value) GetValue() uint64 { // Wrapper message for `int32`. // // The JSON representation for `Int32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type Int32Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The int32 value. @@ -319,6 +341,9 @@ func (x *Int32Value) GetValue() int32 { // Wrapper message for `uint32`. // // The JSON representation for `UInt32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type UInt32Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The uint32 value. @@ -372,6 +397,9 @@ func (x *UInt32Value) GetValue() uint32 { // Wrapper message for `bool`. // // The JSON representation for `BoolValue` is JSON `true` and `false`. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type BoolValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The bool value. @@ -425,6 +453,9 @@ func (x *BoolValue) GetValue() bool { // Wrapper message for `string`. // // The JSON representation for `StringValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type StringValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The string value. @@ -478,6 +509,9 @@ func (x *StringValue) GetValue() string { // Wrapper message for `bytes`. // // The JSON representation for `BytesValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type BytesValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The bytes value. @@ -530,41 +564,32 @@ func (x *BytesValue) GetValue() []byte { var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor -var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, - 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, - 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, - 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_wrappers_proto_rawDesc = "" + + "\n" + + "\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" + + "\vDoubleValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" + + "\n" + + "FloatValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" + + "\n" + + "Int64Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x03R\x05value\"#\n" + + "\vUInt64Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" + + "\n" + + "Int32Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x05R\x05value\"#\n" + + "\vUInt32Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\rR\x05value\"!\n" + + "\tBoolValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\bR\x05value\"#\n" + + "\vStringValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\"\"\n" + + "\n" + + "BytesValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" + + "\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_wrappers_proto_rawDescOnce sync.Once diff --git a/vendor/modules.txt b/vendor/modules.txt index af56d1b2459..9a8fb4fd790 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -44,9 +44,6 @@ github.com/Microsoft/go-winio/pkg/guid ## explicit; go 1.18 github.com/Nerzal/gocloak/v13 github.com/Nerzal/gocloak/v13/pkg/jwx -# github.com/OneOfOne/xxhash v1.2.8 -## explicit; go 1.11 -github.com/OneOfOne/xxhash # github.com/ProtonMail/go-crypto v1.1.3 ## explicit; go 1.17 github.com/ProtonMail/go-crypto/bitcurves @@ -76,7 +73,7 @@ github.com/ProtonMail/go-crypto/openpgp/x448 github.com/RoaringBitmap/roaring github.com/RoaringBitmap/roaring/internal github.com/RoaringBitmap/roaring/roaring64 -# github.com/agnivade/levenshtein v1.2.0 +# github.com/agnivade/levenshtein v1.2.1 ## explicit; go 1.21 github.com/agnivade/levenshtein # github.com/ajg/form v1.5.1 @@ -336,8 +333,8 @@ github.com/coreos/go-systemd/v22/journal # github.com/cornelk/hashmap v1.0.8 ## explicit; go 1.19 github.com/cornelk/hashmap -# github.com/cpuguy83/go-md2man/v2 v2.0.5 -## explicit; go 1.11 +# github.com/cpuguy83/go-md2man/v2 v2.0.6 +## explicit; go 1.12 github.com/cpuguy83/go-md2man/v2/md2man # github.com/crewjam/httperr v0.2.0 ## explicit; go 1.13 @@ -391,6 +388,8 @@ github.com/desertbit/timer github.com/dgraph-io/ristretto github.com/dgraph-io/ristretto/z github.com/dgraph-io/ristretto/z/simd +# github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da +## explicit # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous @@ -438,9 +437,10 @@ github.com/fatih/color github.com/felixge/httpsnoop # github.com/frankban/quicktest v1.14.6 ## explicit; go 1.13 -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/gabriel-vasile/mimetype v1.4.8 ## explicit; go 1.20 github.com/gabriel-vasile/mimetype @@ -716,8 +716,6 @@ github.com/golang/snappy # github.com/gomodule/redigo v1.9.2 ## explicit; go 1.17 github.com/gomodule/redigo/redis -# github.com/google/flatbuffers v2.0.8+incompatible -## explicit # github.com/google/go-cmp v0.7.0 ## explicit; go 1.21 github.com/google/go-cmp/cmp @@ -793,8 +791,8 @@ github.com/gorilla/schema ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware github.com/grpc-ecosystem/go-grpc-middleware/recovery -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 -## explicit; go 1.22.7 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 +## explicit; go 1.22 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options github.com/grpc-ecosystem/grpc-gateway/v2/runtime @@ -1125,18 +1123,12 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/open-policy-agent/opa v0.70.0 -## explicit; go 1.21 +# github.com/open-policy-agent/opa v1.4.2 +## explicit; go 1.23.8 github.com/open-policy-agent/opa/ast -github.com/open-policy-agent/opa/ast/internal/scanner -github.com/open-policy-agent/opa/ast/internal/tokens github.com/open-policy-agent/opa/ast/json -github.com/open-policy-agent/opa/ast/location github.com/open-policy-agent/opa/bundle github.com/open-policy-agent/opa/capabilities -github.com/open-policy-agent/opa/config -github.com/open-policy-agent/opa/format -github.com/open-policy-agent/opa/hooks github.com/open-policy-agent/opa/internal/bundle github.com/open-policy-agent/opa/internal/cidr/merge github.com/open-policy-agent/opa/internal/compiler @@ -1188,33 +1180,48 @@ github.com/open-policy-agent/opa/internal/wasm/opcode github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities github.com/open-policy-agent/opa/internal/wasm/types github.com/open-policy-agent/opa/internal/wasm/util -github.com/open-policy-agent/opa/ir -github.com/open-policy-agent/opa/keys github.com/open-policy-agent/opa/loader -github.com/open-policy-agent/opa/loader/extension -github.com/open-policy-agent/opa/loader/filter -github.com/open-policy-agent/opa/logging -github.com/open-policy-agent/opa/metrics -github.com/open-policy-agent/opa/plugins -github.com/open-policy-agent/opa/plugins/rest github.com/open-policy-agent/opa/rego -github.com/open-policy-agent/opa/resolver -github.com/open-policy-agent/opa/resolver/wasm -github.com/open-policy-agent/opa/schemas github.com/open-policy-agent/opa/storage -github.com/open-policy-agent/opa/storage/inmem -github.com/open-policy-agent/opa/storage/internal/errors -github.com/open-policy-agent/opa/storage/internal/ptr -github.com/open-policy-agent/opa/topdown -github.com/open-policy-agent/opa/topdown/builtins -github.com/open-policy-agent/opa/topdown/cache -github.com/open-policy-agent/opa/topdown/copypropagation github.com/open-policy-agent/opa/topdown/print -github.com/open-policy-agent/opa/tracing github.com/open-policy-agent/opa/types -github.com/open-policy-agent/opa/util -github.com/open-policy-agent/opa/util/decoding -github.com/open-policy-agent/opa/version +github.com/open-policy-agent/opa/v1/ast +github.com/open-policy-agent/opa/v1/ast/internal/scanner +github.com/open-policy-agent/opa/v1/ast/internal/tokens +github.com/open-policy-agent/opa/v1/ast/json +github.com/open-policy-agent/opa/v1/ast/location +github.com/open-policy-agent/opa/v1/bundle +github.com/open-policy-agent/opa/v1/capabilities +github.com/open-policy-agent/opa/v1/config +github.com/open-policy-agent/opa/v1/format +github.com/open-policy-agent/opa/v1/hooks +github.com/open-policy-agent/opa/v1/ir +github.com/open-policy-agent/opa/v1/keys +github.com/open-policy-agent/opa/v1/loader +github.com/open-policy-agent/opa/v1/loader/extension +github.com/open-policy-agent/opa/v1/loader/filter +github.com/open-policy-agent/opa/v1/logging +github.com/open-policy-agent/opa/v1/metrics +github.com/open-policy-agent/opa/v1/plugins +github.com/open-policy-agent/opa/v1/plugins/rest +github.com/open-policy-agent/opa/v1/rego +github.com/open-policy-agent/opa/v1/resolver +github.com/open-policy-agent/opa/v1/resolver/wasm +github.com/open-policy-agent/opa/v1/schemas +github.com/open-policy-agent/opa/v1/storage +github.com/open-policy-agent/opa/v1/storage/inmem +github.com/open-policy-agent/opa/v1/storage/internal/errors +github.com/open-policy-agent/opa/v1/storage/internal/ptr +github.com/open-policy-agent/opa/v1/topdown +github.com/open-policy-agent/opa/v1/topdown/builtins +github.com/open-policy-agent/opa/v1/topdown/cache +github.com/open-policy-agent/opa/v1/topdown/copypropagation +github.com/open-policy-agent/opa/v1/topdown/print +github.com/open-policy-agent/opa/v1/tracing +github.com/open-policy-agent/opa/v1/types +github.com/open-policy-agent/opa/v1/util +github.com/open-policy-agent/opa/v1/util/decoding +github.com/open-policy-agent/opa/v1/version # github.com/opencontainers/runtime-spec v1.1.0 ## explicit github.com/opencontainers/runtime-spec/specs-go @@ -1649,8 +1656,8 @@ github.com/prometheus/alertmanager/matchers/parse github.com/prometheus/alertmanager/pkg/labels github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/types -# github.com/prometheus/client_golang v1.20.5 -## explicit; go 1.20 +# github.com/prometheus/client_golang v1.21.1 +## explicit; go 1.21 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus @@ -1660,8 +1667,8 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.62.0 +## explicit; go 1.21 github.com/prometheus/common/expfmt github.com/prometheus/common/model # github.com/prometheus/procfs v0.15.1 @@ -1806,15 +1813,15 @@ github.com/skeema/knownhosts # github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784 ## explicit github.com/spacewander/go-suffix-tree -# github.com/spf13/afero v1.11.0 -## explicit; go 1.19 +# github.com/spf13/afero v1.12.0 +## explicit; go 1.21 github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cobra v1.8.1 +# github.com/spf13/cobra v1.9.1 ## explicit; go 1.15 github.com/spf13/cobra -# github.com/spf13/pflag v1.0.5 +# github.com/spf13/pflag v1.0.6 ## explicit; go 1.12 github.com/spf13/pflag # github.com/stretchr/objx v0.5.2 @@ -1829,7 +1836,7 @@ github.com/stretchr/testify/require # github.com/studio-b12/gowebdav v0.9.0 => github.com/kobergj/gowebdav v0.0.0-20250102091030-aa65266db202 ## explicit; go 1.17 github.com/studio-b12/gowebdav -# github.com/tchap/go-patricia/v2 v2.3.1 +# github.com/tchap/go-patricia/v2 v2.3.2 ## explicit; go 1.16 github.com/tchap/go-patricia/v2/patricia # github.com/test-go/testify v1.1.4 @@ -2009,22 +2016,26 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 ## explicit; go 1.22.7 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 -## explicit; go 1.22 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/contrib/zpages v0.57.0 -## explicit; go 1.22 +# go.opentelemetry.io/contrib/zpages v0.60.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/zpages go.opentelemetry.io/contrib/zpages/internal -# go.opentelemetry.io/otel v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel v1.35.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -2050,37 +2061,38 @@ go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/metric v1.35.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/sdk v1.35.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/trace v1.35.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded +go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.3.1 -## explicit; go 1.17 +# go.opentelemetry.io/proto/otlp v1.5.0 +## explicit; go 1.22.0 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 @@ -2185,7 +2197,7 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.24.0 +# golang.org/x/oauth2 v0.26.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -2267,25 +2279,26 @@ golang.org/x/tools/internal/versions ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 +# google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 ## explicit; go 1.21 google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 -## explicit; go 1.21 +# google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a +## explicit; go 1.22 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 -## explicit; go 1.21 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a +## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.68.0 -## explicit; go 1.22.7 +# google.golang.org/grpc v1.71.1 +## explicit; go 1.22.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/endpointsharding google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/pickfirst/internal @@ -2320,7 +2333,9 @@ google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/proxyattributes google.golang.org/grpc/internal/resolver +google.golang.org/grpc/internal/resolver/delegatingresolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough @@ -2346,8 +2361,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.5 -## explicit; go 1.21 +# google.golang.org/protobuf v1.36.6 +## explicit; go 1.22 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext