diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 82eb203faafba..7e507081cfe64 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -226,15 +226,15 @@ ui: # CLI flag: -ui.ring.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -ui.ring.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -ui.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. 0 = disabled. + # Period at which to heartbeat to the ring. # CLI flag: -ui.ring.heartbeat-period [heartbeat_period: | default = 15s] @@ -451,16 +451,16 @@ pattern_ingester: # CLI flag: -pattern-ingester.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -pattern-ingester.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -pattern-ingester.multi.mirror-timeout [mirror_timeout: | default = 2s] # The heartbeat timeout after which ingesters are skipped for - # reads/writes. 0 = never (timeout disabled). + # reads/writes. # CLI flag: -pattern-ingester.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -482,12 +482,11 @@ pattern_ingester: # CLI flag: -pattern-ingester.num-tokens [num_tokens: | default = 128] - # Period at which to heartbeat to consul. 0 = disabled. + # Period at which to heartbeat to consul. # CLI flag: -pattern-ingester.heartbeat-period [heartbeat_period: | default = 5s] - # Heartbeat timeout after which instance is assumed to be unhealthy. 0 = - # disabled. + # Heartbeat timeout after which instance is assumed to be unhealthy. # CLI flag: -pattern-ingester.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -1249,16 +1248,16 @@ dataobj: # CLI flag: -dataobj-consumer.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -dataobj-consumer.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -dataobj-consumer.multi.mirror-timeout [mirror_timeout: | default = 2s] # The heartbeat timeout after which ingesters are skipped for - # reads/writes. 0 = never (timeout disabled). + # reads/writes. # CLI flag: -dataobj-consumer.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -1280,12 +1279,11 @@ dataobj: # CLI flag: -dataobj-consumer.num-tokens [num_tokens: | default = 128] - # Period at which to heartbeat to consul. 0 = disabled. + # Period at which to heartbeat to consul. # CLI flag: -dataobj-consumer.heartbeat-period [heartbeat_period: | default = 5s] - # Heartbeat timeout after which instance is assumed to be unhealthy. 0 = - # disabled. + # Heartbeat timeout after which instance is assumed to be unhealthy. # CLI flag: -dataobj-consumer.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -1390,11 +1388,11 @@ dataobj: # CLI flag: -dataobj-consumer.partition-ring.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -dataobj-consumer.partition-ring.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -dataobj-consumer.partition-ring.multi.mirror-timeout [mirror_timeout: | default = 2s] @@ -1556,16 +1554,16 @@ ingest_limits: # CLI flag: -ingest-limits.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -ingest-limits.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -ingest-limits.multi.mirror-timeout [mirror_timeout: | default = 2s] # The heartbeat timeout after which ingesters are skipped for - # reads/writes. 0 = never (timeout disabled). + # reads/writes. # CLI flag: -ingest-limits.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -1587,12 +1585,11 @@ ingest_limits: # CLI flag: -ingest-limits.num-tokens [num_tokens: | default = 128] - # Period at which to heartbeat to consul. 0 = disabled. + # Period at which to heartbeat to consul. # CLI flag: -ingest-limits.heartbeat-period [heartbeat_period: | default = 5s] - # Heartbeat timeout after which instance is assumed to be unhealthy. 0 = - # disabled. + # Heartbeat timeout after which instance is assumed to be unhealthy. # CLI flag: -ingest-limits.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -1726,16 +1723,16 @@ ingest_limits_frontend: # CLI flag: -ingest-limits-frontend.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -ingest-limits-frontend.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -ingest-limits-frontend.multi.mirror-timeout [mirror_timeout: | default = 2s] # The heartbeat timeout after which ingesters are skipped for - # reads/writes. 0 = never (timeout disabled). + # reads/writes. # CLI flag: -ingest-limits-frontend.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -1757,12 +1754,11 @@ ingest_limits_frontend: # CLI flag: -ingest-limits-frontend.num-tokens [num_tokens: | default = 128] - # Period at which to heartbeat to consul. 0 = disabled. + # Period at which to heartbeat to consul. # CLI flag: -ingest-limits-frontend.heartbeat-period [heartbeat_period: | default = 5s] - # Heartbeat timeout after which instance is assumed to be unhealthy. 0 = - # disabled. + # Heartbeat timeout after which instance is assumed to be unhealthy. # CLI flag: -ingest-limits-frontend.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -2701,15 +2697,15 @@ ring: # CLI flag: -common.storage.ring.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -common.storage.ring.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -common.storage.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. 0 = disabled. + # Period at which to heartbeat to the ring. # CLI flag: -common.storage.ring.heartbeat-period [heartbeat_period: | default = 15s] @@ -2907,15 +2903,15 @@ compactor_ring: # CLI flag: -compactor.ring.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -compactor.ring.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -compactor.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. 0 = disabled. + # Period at which to heartbeat to the ring. # CLI flag: -compactor.ring.heartbeat-period [heartbeat_period: | default = 15s] @@ -3177,15 +3173,15 @@ ring: # CLI flag: -distributor.ring.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -distributor.ring.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -distributor.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. 0 = disabled. + # Period at which to heartbeat to the ring. # CLI flag: -distributor.ring.heartbeat-period [heartbeat_period: | default = 5s] @@ -3631,7 +3627,7 @@ backoff_config: [connect_backoff_max_delay: | default = 5s] cluster_validation: - # Optionally define the cluster validation label. + # Primary cluster validation label. # CLI flag: -.cluster-validation.label [label: | default = ""] ``` @@ -3684,15 +3680,15 @@ ring: # CLI flag: -index-gateway.ring.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -index-gateway.ring.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -index-gateway.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. 0 = disabled. + # Period at which to heartbeat to the ring. # CLI flag: -index-gateway.ring.heartbeat-period [heartbeat_period: | default = 15s] @@ -3779,16 +3775,15 @@ lifecycler: # CLI flag: -multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -multi.mirror-timeout [mirror_timeout: | default = 2s] # The heartbeat timeout after which ingesters are skipped for reads/writes. - # 0 = never (timeout disabled). # CLI flag: -ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -3810,12 +3805,11 @@ lifecycler: # CLI flag: -ingester.num-tokens [num_tokens: | default = 128] - # Period at which to heartbeat to consul. 0 = disabled. + # Period at which to heartbeat to consul. # CLI flag: -ingester.heartbeat-period [heartbeat_period: | default = 5s] - # Heartbeat timeout after which instance is assumed to be unhealthy. 0 = - # disabled. + # Heartbeat timeout after which instance is assumed to be unhealthy. # CLI flag: -ingester.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -4073,11 +4067,11 @@ kafka_ingestion: # CLI flag: -ingester.partition-ring.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -ingester.partition-ring.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -ingester.partition-ring.multi.mirror-timeout [mirror_timeout: | default = 2s] @@ -5085,6 +5079,19 @@ When a memberlist config with atleast 1 join_members is defined, kvstore of type # The TLS configuration. # The CLI flags prefix for this block configuration is: memberlist [] + +zone_aware_routing: + # Enable zone-aware routing for memberlist gossip. + # CLI flag: -memberlist.zone-aware-routing.enabled + [enabled: | default = false] + + # Availability zone where this node is running. + # CLI flag: -memberlist.zone-aware-routing.instance-availability-zone + [instance_availability_zone: | default = ""] + + # Role of this node in the cluster. Valid values: member, bridge. + # CLI flag: -memberlist.zone-aware-routing.role + [role: | default = "member"] ``` ### named_stores_config @@ -5504,15 +5511,15 @@ scheduler_ring: # CLI flag: -query-scheduler.ring.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -query-scheduler.ring.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -query-scheduler.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. 0 = disabled. + # Period at which to heartbeat to the ring. # CLI flag: -query-scheduler.ring.heartbeat-period [heartbeat_period: | default = 15s] @@ -5753,15 +5760,15 @@ ring: # CLI flag: -ruler.ring.multi.secondary [secondary: | default = ""] - # Mirror writes to secondary store. + # Mirror writes to the secondary store. # CLI flag: -ruler.ring.multi.mirror-enabled [mirror_enabled: | default = false] - # Timeout for storing value to secondary store. + # Timeout for storing a value to the secondary store. # CLI flag: -ruler.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Interval between heartbeats sent to the ring. 0 = disabled. + # Interval between heartbeats sent to the ring. # CLI flag: -ruler.ring.heartbeat-period [heartbeat_period: | default = 5s] @@ -6309,10 +6316,15 @@ grpc_tls_config: [http_path_prefix: | default = ""] cluster_validation: - # Optionally define the cluster validation label. + # Primary cluster validation label. # CLI flag: -server.cluster-validation.label [label: | default = ""] + # Comma-separated list of additional cluster validation labels that the server + # will accept from incoming requests. + # CLI flag: -server.cluster-validation.additional-labels + [additional_labels: | default = ""] + grpc: # When enabled, cluster label validation is executed: configured cluster # validation label is compared with the cluster validation label received diff --git a/go.mod b/go.mod index 4f22bc53f6452..0a23c90452ee2 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,8 @@ go 1.25.3 toolchain go1.25.4 +ignore ./tools/dev + require ( cloud.google.com/go/bigtable v1.40.1 cloud.google.com/go/pubsub/v2 v2.3.0 @@ -53,9 +55,9 @@ require ( github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 - github.com/grafana/dskit v0.0.0-20250917065751-798f5a8fa154 + github.com/grafana/dskit v0.0.0-20251210115601-41c7cf07196b github.com/grafana/go-gelf/v2 v2.0.1 - github.com/grafana/gomemcache v0.0.0-20250828162811-a96f6acee2fe + github.com/grafana/gomemcache v0.0.0-20251127154401-74f93547077b github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 @@ -179,7 +181,6 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect - github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect github.com/andybalholm/brotli v1.2.0 // indirect github.com/atotto/clipboard v0.1.4 // indirect github.com/aws/aws-sdk-go v1.55.7 // indirect @@ -235,6 +236,7 @@ require ( github.com/grafana/otel-profiling-go v0.5.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/jaegertracing/jaeger-idl v0.5.0 // indirect @@ -267,7 +269,7 @@ require ( github.com/parquet-go/bitpack v0.2.0 // indirect github.com/parquet-go/jsonlite v0.8.1 // indirect github.com/philhofer/fwd v1.2.0 // indirect - github.com/pires/go-proxyproto v0.7.0 // indirect + github.com/pires/go-proxyproto v0.8.1 // indirect github.com/pkg/xattr v0.4.12 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect @@ -276,7 +278,7 @@ require ( github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/sahilm/fuzzy v0.1.1 // indirect - github.com/sercand/kuberesolver/v6 v6.0.0 // indirect + github.com/sercand/kuberesolver/v6 v6.0.1 // indirect github.com/sethvargo/go-retry v0.3.0 // indirect github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/tinylib/msgp v1.5.0 // indirect @@ -295,31 +297,31 @@ require ( go.opentelemetry.io/collector/featuregate v1.47.0 // indirect go.opentelemetry.io/collector/pipeline v1.45.0 // indirect go.opentelemetry.io/collector/processor v1.45.0 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect - go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 // indirect - go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 // indirect - go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0 // indirect + go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 // indirect + go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 // indirect + go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.58.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect go.opentelemetry.io/otel/log v0.14.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect + go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect + golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 // indirect + golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect modernc.org/libc v1.66.10 // indirect @@ -411,12 +413,11 @@ require ( github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect - github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect @@ -467,8 +468,8 @@ require ( github.com/xdg-go/stringprep v1.0.4 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect go.etcd.io/etcd/api/v3 v3.6.6 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect - go.etcd.io/etcd/client/v3 v3.5.4 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.6 // indirect + go.etcd.io/etcd/client/v3 v3.6.6 // indirect go.mongodb.org/mongo-driver v1.17.6 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/semconv v0.128.0 // indirect @@ -479,9 +480,9 @@ require ( go.opentelemetry.io/otel/trace v1.38.0 go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/mod v0.29.0 // indirect + golang.org/x/mod v0.30.0 // indirect golang.org/x/term v0.37.0 // indirect - golang.org/x/tools v0.38.0 // indirect + golang.org/x/tools v0.39.0 // indirect google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 @@ -507,7 +508,7 @@ exclude k8s.io/client-go v8.0.0+incompatible // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet. -replace github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe +replace github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20251126142931-6f9f62ab6f86 // Insist on the optimised version of grafana/regexp replace github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc diff --git a/go.sum b/go.sum index 608653d8df02e..f10b4f1293ac1 100644 --- a/go.sum +++ b/go.sum @@ -132,8 +132,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 h1:s0WlVbf9qpvkh1c/uDAPElam0WrL7fHRIidgZJ7UqZI= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= -github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= -github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/HdrHistogram/hdrhistogram-go v1.2.0 h1:XMJkDWuz6bM9Fzy7zORuVFKH7ZJY41G2q8KWhVGkNiY= +github.com/HdrHistogram/hdrhistogram-go v1.2.0/go.mod h1:CiIeGiHSd06zjX+FypuEJ5EQ07KKtxZ+8J6hszwVQig= github.com/IBM/go-sdk-core/v5 v5.21.2 h1:mJ5QbLPOm4g5qhZiVB6wbSllfpeUExftGoyPek2hk4M= github.com/IBM/go-sdk-core/v5 v5.21.2/go.mod h1:ngpMgwkjur1VNUjqn11LPk3o5eCyOCRbcfg/0YAY7Hc= github.com/IBM/ibm-cos-sdk-go v1.12.4 h1:XUxGUQoqNH6Cpa/eSItbRhVRdmDIsd9jCTMeTVh1siE= @@ -154,7 +154,6 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Workiva/go-datastructures v1.1.7 h1:q5RXlAeKm3zDpZTbYXwdMb1gN9RtGSvOCtPXGJJL6Cs= github.com/Workiva/go-datastructures v1.1.7/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= @@ -176,14 +175,12 @@ github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow-go/v18 v18.4.1 h1:q/jVkBWCJOB9reDgaIZIdruLQUb1kbkvOnOFezVH1C4= github.com/apache/arrow-go/v18 v18.4.1/go.mod h1:tLyFubsAl17bvFdUAy24bsSvA/6ww95Iqi67fTpGu3E= github.com/apache/cassandra-gocql-driver/v2 v2.0.0 h1:Omnzb1Z/P90Dr2TbVNu54ICQL7TKVIIsJO231w484HU= github.com/apache/cassandra-gocql-driver/v2 v2.0.0/go.mod h1:QH/asJjB3mHvY6Dot6ZKMMpTcOrWJ8i9GhsvG1g0PK4= github.com/apache/thrift v0.22.0 h1:r7mTJdj51TMDe6RtcmNdQxgn9XcyfGDOzegMDRg47uc= github.com/apache/thrift v0.22.0/go.mod h1:1e7J/O1Ae6ZQMTYdy9xa3w9k+XHWPfRvdPyJeynQ+/g= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= @@ -312,7 +309,6 @@ github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEX github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= github.com/coder/quartz v0.3.0 h1:bUoSEJ77NBfKtUqv6CPSC0AS8dsjqAqqAv7bN02m1mg= @@ -327,15 +323,12 @@ github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= @@ -376,7 +369,6 @@ github.com/dolthub/swiss v0.2.1 h1:gs2osYs5SJkAaH5/ggVJqXQxRXtWshF6uE0lgR/Y3Gw= github.com/dolthub/swiss v0.2.1/go.mod h1:8AhKZZ1HK7g18j7v7k6c5cYIGEZJcPn0ARsai8cUrh0= github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g= github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= @@ -398,7 +390,6 @@ github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= @@ -421,7 +412,6 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluent/fluent-bit-go v0.0.0-20230731091245-a7a013e2473c h1:yKN46XJHYC/gvgH2UsisJ31+n4K3S7QYZSfU2uAWjuI= github.com/fluent/fluent-bit-go v0.0.0-20230731091245-a7a013e2473c/go.mod h1:L92h+dgwElEyUuShEwjbiHjseW410WIcNz+Bjutc8YQ= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -438,7 +428,6 @@ github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sa github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik= github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -535,7 +524,6 @@ github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= @@ -554,7 +542,6 @@ github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXe github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -583,7 +570,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= @@ -658,30 +644,28 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= -github.com/grafana/dskit v0.0.0-20250917065751-798f5a8fa154 h1:ojrJ/ctyUGsZ/gem0o6hnhe+keaZhMVq4cg/1kPALbE= -github.com/grafana/dskit v0.0.0-20250917065751-798f5a8fa154/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= +github.com/grafana/dskit v0.0.0-20251210115601-41c7cf07196b h1:9O3CM9FvBOWWlwHjXMmRmbR1duNKqA1xJn27GAfbGTM= +github.com/grafana/dskit v0.0.0-20251210115601-41c7cf07196b/go.mod h1:YXa5jxR6Ls9nhYkg7x0GbrZDXmkxZVDeyBrKxQ8OKZU= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= -github.com/grafana/gomemcache v0.0.0-20250828162811-a96f6acee2fe h1:q+QaVANzNZxvTovycpQvDTfsNZ2rHh4XIIaccMnrIR4= -github.com/grafana/gomemcache v0.0.0-20250828162811-a96f6acee2fe/go.mod h1:j/s0jkda4UXTemDs7Pgw/vMT06alWc42CHisvYac0qw= +github.com/grafana/gomemcache v0.0.0-20251127154401-74f93547077b h1:5qp8/5YPt/Z2RW5QHsxvwE05+LWQYIXydP2MwOkMfb8= +github.com/grafana/gomemcache v0.0.0-20251127154401-74f93547077b/go.mod h1:j/s0jkda4UXTemDs7Pgw/vMT06alWc42CHisvYac0qw= github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 h1:U94jQ2TQr1m3HNyE8efSdyaBbDrdPaWImXyenuKZ/nw= github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= -github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= -github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/grafana/memberlist v0.3.1-0.20251126142931-6f9f62ab6f86 h1:aTwfQuroOmOr//QEn9J1MtC4R4CPR9/IbUd8hZrbWKo= +github.com/grafana/memberlist v0.3.1-0.20251126142931-6f9f62ab6f86/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE= github.com/grafana/objstore v0.0.0-20250728171719-b5d3b766d1b0 h1:IXRV9gFgzmWIfsFf7ZDQmo94BvDBFGJZ/+yLQNpObIo= github.com/grafana/objstore v0.0.0-20250728171719-b5d3b766d1b0/go.mod h1:Quz9HUDjGidU0RQpoytzK4KqJ7kwzP+DMAm4K57/usM= github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls= -github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= -github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 h1:bjh0PVYSVVFxzINqPFYJmAmJNrWPgnVjuSdYJGHmtFU= github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0/go.mod h1:7t5XR+2IA8P2qggOAHTj/GCZfoLBle3OvNSYh1VkRBU= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 h1:B+8ClL/kCQkRiU82d9xajRPKYMrB7E0MbtzWVi1K4ns= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBHOYlCT25+YmGpJ32dZPi75pGEUpj4= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hashicorp/consul/api v1.33.0 h1:MnFUzN1Bo6YDGi/EsRLbVNgA4pyCymmcswrE5j4OHBM= @@ -703,12 +687,8 @@ github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= @@ -716,7 +696,6 @@ github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVU github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -788,7 +767,6 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kamstrup/intmap v0.5.1 h1:ENGAowczZA+PJPYYlreoqJvWgQVtAmX1l899WfYFVK0= github.com/kamstrup/intmap v0.5.1/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= @@ -864,7 +842,6 @@ github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= @@ -928,7 +905,6 @@ github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/ncw/swift/v2 v2.0.5 h1:9o5Gsd7bInAFEqsGPcaUdsboMbqf8lnNtxqWKFT9iz8= github.com/ncw/swift/v2 v2.0.5/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E= @@ -975,7 +951,6 @@ github.com/parquet-go/jsonlite v0.8.1 h1:TdvfyPaVLTlz/Zsl+amWO4h0tpEwXwRkd7xa4iP github.com/parquet-go/jsonlite v0.8.1/go.mod h1:nDjpkpL4EOtqs6NQugUsi0Rleq9sW/OtC1NnZEnxzF0= github.com/parquet-go/parquet-go v0.26.0 h1:5rWuYYCKouRlo1kLihNAcw2+mb/OLJhIZjjpFu1lX9k= github.com/parquet-go/parquet-go v0.26.0/go.mod h1:7K8PVhWjeOLCtcV0cT3DFMfegbcM9uwvVNc2F+Cmsw4= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= @@ -984,8 +959,8 @@ github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= -github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= +github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= +github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1060,7 +1035,6 @@ github.com/richardartoul/molecule v1.0.0 h1:+LFA9cT7fn8KF39zy4dhOnwcOwRoqKiBkPqK github.com/richardartoul/molecule v1.0.0/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= @@ -1078,8 +1052,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= -github.com/sercand/kuberesolver/v6 v6.0.0 h1:ScvS2Ga9snVkpOahln/BCLySr3/iBAHJf25u66DweZ0= -github.com/sercand/kuberesolver/v6 v6.0.0/go.mod h1:Dxkqms3OJadP5zirIBPLi9FV8Qpys3T3w40XPEcVsu0= +github.com/sercand/kuberesolver/v6 v6.0.1 h1:XZUTA0gy/lgDYp/UhEwv7Js24F1j8NJ833QrWv0Xux4= +github.com/sercand/kuberesolver/v6 v6.0.1/go.mod h1:C0tsTuRMONSY+Xf7pv7RMW1/JlewY1+wS8SZE+1lf1s= github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= github.com/shirou/gopsutil/v4 v4.25.11 h1:X53gB7muL9Gnwwo2evPSE+SfOrltMoR6V3xJAXZILTY= @@ -1183,7 +1157,6 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= @@ -1197,13 +1170,12 @@ go.einride.tech/aip v0.73.0 h1:bPo4oqBo2ZQeBKo4ZzLb1kxYXTY1ysJhpvQyfuGzvps= go.einride.tech/aip v0.73.0/go.mod h1:Mj7rFbmXEgw0dq1dqJ7JGMvYCZZVxmGOR3S4ZcV5LvQ= go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= -go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.6.6 h1:mcaMp3+7JawWv69p6QShYWS8cIWUOl32bFLb6qf8pOQ= go.etcd.io/etcd/api/v3 v3.6.6/go.mod h1:f/om26iXl2wSkcTA1zGQv8reJRSLVdoEBsi4JdfMrx4= -go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= -go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= -go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.etcd.io/etcd/client/pkg/v3 v3.6.6 h1:uoqgzSOv2H9KlIF5O1Lsd8sW+eMLuV6wzE3q5GJGQNs= +go.etcd.io/etcd/client/pkg/v3 v3.6.6/go.mod h1:YngfUVmvsvOJ2rRgStIyHsKtOt9SZI2aBJrZiWJhCbI= +go.etcd.io/etcd/client/v3 v3.6.6 h1:G5z1wMf5B9SNexoxOHUGBaULurOZPIgGPsW6CN492ec= +go.etcd.io/etcd/client/v3 v3.6.6/go.mod h1:36Qv6baQ07znPR3+n7t+Rk5VHEzVYPvFfGmfF4wBHV8= go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1251,49 +1223,49 @@ go.opentelemetry.io/collector/processor/xprocessor v0.139.0 h1:O9x9RF/OG8gZ+HrOc go.opentelemetry.io/collector/processor/xprocessor v0.139.0/go.mod h1:hqGhEZ1/PftD/QHaYna0o1xAqZUsb7GhqpOiaTTDJnQ= go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4= go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns= -go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+w3RlZCP0vJByDVzcpAe3M= -go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 h1:/Rij/t18Y7rUayNg7Id6rPrEnHgorxYabm2E6wUdPP4= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0/go.mod h1:AdyDPn6pkbkt2w01n3BubRVk7xAsCRq1Yg1mpfyA/0E= go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= -go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI= -go.opentelemetry.io/contrib/exporters/autoexport v0.61.0/go.mod h1:N6otC+qXTD5bAnbK2O1f/1SXq3cX+3KYSWrkBUqG0cw= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 h1:NLnZybb9KkfMXPwZhd5diBYJoVxiO9Qa06dacEA7ySY= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0/go.mod h1:OvRg7gm5WRSCtxzGSsrFHbDLToYlStHNZQ+iPNIyD6g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 h1:2pn7OzMewmYRiNtv1doZnLo3gONcnMHlFnmOR8Vgt+8= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0/go.mod h1:rjbQTDEPQymPE0YnRQp9/NuPwwtL0sesz/fnqRW/v84= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 h1:UIrZgRBHUrYRlJ4V419lVb4rs2ar0wFzKNAebaP05XU= -go.opentelemetry.io/contrib/propagators/jaeger v1.35.0/go.mod h1:0ciyFyYZxE6JqRAQvIgGRabKWDUmNdW3GAQb6y/RlFU= -go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0 h1:bQ1Gvah4Sp8z7epSkgJaNTuZm7sutfA6Fji2/7cKFMc= -go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0/go.mod h1:9b8Q9rH52NgYH3ShiTFB5wf18Vt3RTH/VMB7LDcC1ug= +go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 h1:nXGeLvT1QtCAhkASkP/ksjkTKZALIaQBIW+JSIw1KIc= +go.opentelemetry.io/contrib/propagators/jaeger v1.38.0/go.mod h1:oMvOXk78ZR3KEuPMBgp/ThAMDy9ku/eyUVztr+3G6Wo= +go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0 h1:oPW/SRFyHgIgxrvNhSBzqvZER2N5kRlci3/rGTOuyWo= +go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0/go.mod h1:B9Oka5QVD0bnmZNO6gBbBta6nohD/1Z+f9waH2oXyBs= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2/go.mod h1:QTnxBwT/1rBIgAG1goq6xMydfYOBKU6KTiYF4fp5zL8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 h1:zwdo1gS2eH26Rg+CoqVQpEK1h8gvt5qyU5Kk5Bixvow= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0/go.mod h1:rUKCPscaRWWcqGT6HnEmYrK+YNe5+Sw64xgQTOJ5b30= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 h1:OMqPldHt79PqWKOMYIAQs3CxAi7RLgPxwfFSwr4ZxtM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0/go.mod h1:1biG4qiqTxKiUCtoWDPpL3fB3KxVwCiGw81j3nKMuHE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 h1:QQqYw3lkrzwVsoEX0w//EhH/TCnpRdEenKBOOEIMjWc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0/go.mod h1:gSVQcr17jk2ig4jqJ2DX30IdWH251JcNAecvrqTxH1s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= -go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= -go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 h1:12vMqzLLNZtXuXbJhSENRg+Vvx+ynNilV8twBLBsXMY= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2/go.mod h1:ZccPZoPOoq8x3Trik/fCsba7DEYDUnN6yX79pgp2BUQ= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 h1:B/g+qde6Mkzxbry5ZZag0l7QrQBCtVm7lVjaLgmpje8= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0/go.mod h1:mOJK8eMmgW6ocDJn6Bn11CcZ05gi3P8GylBXEkZtbgA= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwWBJF/mHZc7A02YHedfFDENwJEdRA0= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= @@ -1302,10 +1274,10 @@ go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2 go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0= -go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= -go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0= -go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE= +go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg= +go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA= go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= @@ -1319,15 +1291,12 @@ go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0 h1:o go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0/go.mod h1:Gyb6Xe7FTi/6xBHwMmngGoHqL0w29Y4eW8TGFzpefGA= go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0 h1:EiUYvtwu6PMrMHVjcPfnsG3v+ajPkbUeH+IL93+QYyk= go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0/go.mod h1:mUUHKFiN2SST3AhJ8XhJxEoeVW12oqfXog0Bo8W3Ec4= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= @@ -1340,7 +1309,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1351,9 +1319,7 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -1363,9 +1329,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1378,7 +1343,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1387,11 +1351,10 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1406,7 +1369,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1425,7 +1387,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -1452,7 +1413,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= @@ -1471,8 +1431,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1499,10 +1457,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1524,8 +1479,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= -golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -1538,7 +1493,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= @@ -1555,7 +1509,6 @@ golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1567,7 +1520,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1598,11 +1550,10 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/tools/godoc v0.1.0-deprecated h1:o+aZ1BOj6Hsx/GBdJO/s815sqftjSnrZZwyYTHODvtk= golang.org/x/tools/godoc v0.1.0-deprecated/go.mod h1:qM63CriJ961IHWmnWa9CjZnBndniPt4a3CK0PVB9bIg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1611,14 +1562,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1667,14 +1614,12 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba h1:B14OtaXuMaCQsl2deSvNkyPKIzq3BjfxQp8d00QyWx4= @@ -1694,9 +1639,7 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1710,7 +1653,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= @@ -1718,7 +1660,6 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1735,7 +1676,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1743,7 +1683,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -1797,7 +1736,6 @@ modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= @@ -1806,7 +1744,6 @@ sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= zombiezen.com/go/sqlite v1.4.2 h1:KZXLrBuJ7tKNEm+VJcApLMeQbhmAUOKA5VWS93DfFRo= diff --git a/pkg/distributor/distributor_ring.go b/pkg/distributor/distributor_ring.go index 0beffd91791a3..fe39a0e10d1fb 100644 --- a/pkg/distributor/distributor_ring.go +++ b/pkg/distributor/distributor_ring.go @@ -47,7 +47,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix("distributor.ring.", "collectors/", f) - f.DurationVar(&cfg.HeartbeatPeriod, "distributor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") + f.DurationVar(&cfg.HeartbeatPeriod, "distributor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") f.DurationVar(&cfg.HeartbeatTimeout, "distributor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which distributors are considered unhealthy within the ring. 0 = never (timeout disabled).") // Instance flags diff --git a/pkg/distributor/instance_count_test.go b/pkg/distributor/instance_count_test.go index 4eca382e8d0bb..b282bc6058b49 100644 --- a/pkg/distributor/instance_count_test.go +++ b/pkg/distributor/instance_count_test.go @@ -111,7 +111,7 @@ func TestInstanceCountDelegate_CorrectlyInvokesOtherDelegates(t *testing.T) { require.NoError(t, err) ingesters := ring.NewDesc() - ingesters.AddIngester("ingester-0", "ingester-0:3100", "zone-a", []uint32{1}, ring.ACTIVE, time.Now(), false, time.Now()) + ingesters.AddIngester("ingester-0", "ingester-0:3100", "zone-a", []uint32{1}, ring.ACTIVE, time.Now(), false, time.Now(), nil) // initial state. require.Equal(t, 0, sentry1["Heartbeat"]) diff --git a/pkg/ingester/downscale.go b/pkg/ingester/downscale.go index 55b3ee2d0ae9b..a14ff70bc61bb 100644 --- a/pkg/ingester/downscale.go +++ b/pkg/ingester/downscale.go @@ -1,6 +1,7 @@ package ingester import ( + "errors" "net/http" "github.com/go-kit/log" @@ -82,6 +83,12 @@ func (i *Ingester) PreparePartitionDownscaleHandler(w http.ResponseWriter, r *ht // "lookback period" ago, but since we delete inactive partitions with no owners that moved to inactive since longer // than "lookback period" ago, it looks to be an edge case not worth to address. if err := i.partitionRingLifecycler.ChangePartitionState(r.Context(), ring.PartitionActive); err != nil { + if errors.Is(err, ring.ErrPartitionStateChangeLocked) { + level.Warn(logger).Log("msg", "failed to change partition state to active", "err", err) + w.WriteHeader(http.StatusConflict) + return + } + level.Error(logger).Log("msg", "failed to change partition state to active", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/pkg/limits/frontend/frontend_test.go b/pkg/limits/frontend/frontend_test.go index 8ed3ff61b92d7..2d46674e0815d 100644 --- a/pkg/limits/frontend/frontend_test.go +++ b/pkg/limits/frontend/frontend_test.go @@ -164,6 +164,8 @@ func TestFrontend_ExceedsLimits(t *testing.T) { Store: "inmemory", }, }, + HeartbeatPeriod: time.Second, + HeartbeatTimeout: time.Minute, }, }, "test", readRing, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) diff --git a/pkg/limits/frontend/http_test.go b/pkg/limits/frontend/http_test.go index dd12dec809278..666517ed02329 100644 --- a/pkg/limits/frontend/http_test.go +++ b/pkg/limits/frontend/http_test.go @@ -7,6 +7,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/go-kit/log" "github.com/grafana/dskit/kv" @@ -83,6 +84,8 @@ func TestFrontend_ServeHTTP(t *testing.T) { Store: "inmemory", }, }, + HeartbeatPeriod: time.Millisecond, + HeartbeatTimeout: time.Millisecond, }, }, "test", readRing, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) diff --git a/pkg/ruler/base/lifecycle_test.go b/pkg/ruler/base/lifecycle_test.go index 417810d1dcf2a..669ec5f459e4e 100644 --- a/pkg/ruler/base/lifecycle_test.go +++ b/pkg/ruler/base/lifecycle_test.go @@ -79,7 +79,7 @@ func TestRuler_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T) { require.NoError(t, ringStore.CAS(ctx, ringKey, func(in interface{}) (interface{}, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) - instance := ringDesc.AddIngester(unhealthyInstanceID, "1.1.1.1", "", generateSortedTokens(config.Ring.NumTokens), ring.ACTIVE, time.Now(), false, time.Now()) + instance := ringDesc.AddIngester(unhealthyInstanceID, "1.1.1.1", "", generateSortedTokens(config.Ring.NumTokens), ring.ACTIVE, time.Now(), false, time.Now(), nil) instance.Timestamp = time.Now().Add(-(ringAutoForgetUnhealthyPeriods + 1) * heartbeatTimeout).Unix() ringDesc.Ingesters[unhealthyInstanceID] = instance diff --git a/pkg/ruler/base/ruler_ring.go b/pkg/ruler/base/ruler_ring.go index 4d50dd2934c51..5609d6f959d5d 100644 --- a/pkg/ruler/base/ruler_ring.go +++ b/pkg/ruler/base/ruler_ring.go @@ -63,7 +63,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix("ruler.ring.", "rulers/", f) - f.DurationVar(&cfg.HeartbeatPeriod, "ruler.ring.heartbeat-period", 5*time.Second, "Interval between heartbeats sent to the ring. 0 = disabled.") + f.DurationVar(&cfg.HeartbeatPeriod, "ruler.ring.heartbeat-period", 5*time.Second, "Interval between heartbeats sent to the ring.") f.DurationVar(&cfg.HeartbeatTimeout, "ruler.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ruler ring members are considered unhealthy within the ring. 0 = never (timeout disabled).") // Instance flags diff --git a/pkg/ruler/base/ruler_test.go b/pkg/ruler/base/ruler_test.go index d50d00b2ff307..ef42a5b7df97e 100644 --- a/pkg/ruler/base/ruler_test.go +++ b/pkg/ruler/base/ruler_test.go @@ -545,6 +545,8 @@ func TestGetRules(t *testing.T) { KVStore: kv.Config{ Mock: kvStore, }, + HeartbeatPeriod: time.Second, + HeartbeatTimeout: time.Minute, } m := loki_storage.NewClientMetrics() defer m.Unregister() @@ -590,7 +592,7 @@ func TestGetRules(t *testing.T) { d = ring.NewDesc() } for rID, tokens := range allTokensByRuler { - d.AddIngester(rID, rulerAddrMap[rID].lifecycler.GetInstanceAddr(), "", tokens, ring.ACTIVE, time.Now(), false, time.Now()) + d.AddIngester(rID, rulerAddrMap[rID].lifecycler.GetInstanceAddr(), "", tokens, ring.ACTIVE, time.Now(), false, time.Now(), nil) } return d, true, nil }) @@ -772,7 +774,7 @@ func TestSharding(t *testing.T) { sharding: true, shardingStrategy: util.ShardingStrategyDefault, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ruler1: allRules}, }, @@ -782,7 +784,7 @@ func TestSharding(t *testing.T) { shardingStrategy: util.ShardingStrategyDefault, enabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ruler1: map[string]rulespb.RuleGroupList{ user1: {user1Group1, user1Group2}, @@ -794,7 +796,7 @@ func TestSharding(t *testing.T) { shardingStrategy: util.ShardingStrategyDefault, disabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ruler1: map[string]rulespb.RuleGroupList{ user2: {user2Group1}, @@ -806,8 +808,8 @@ func TestSharding(t *testing.T) { sharding: true, shardingStrategy: util.ShardingStrategyDefault, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -828,8 +830,8 @@ func TestSharding(t *testing.T) { shardingStrategy: util.ShardingStrategyDefault, enabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -848,8 +850,8 @@ func TestSharding(t *testing.T) { shardingStrategy: util.ShardingStrategyDefault, disabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -868,7 +870,7 @@ func TestSharding(t *testing.T) { shardingStrategy: util.ShardingStrategyDefault, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) desc.Ingesters[ruler2] = ring.InstanceDesc{ Addr: ruler2Addr, Timestamp: time.Now().Add(-time.Hour).Unix(), @@ -892,8 +894,8 @@ func TestSharding(t *testing.T) { shardingStrategy: util.ShardingStrategyDefault, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.LEAVING, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.LEAVING, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -908,8 +910,8 @@ func TestSharding(t *testing.T) { shardingStrategy: util.ShardingStrategyDefault, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.JOINING, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.JOINING, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -924,7 +926,7 @@ func TestSharding(t *testing.T) { shardingStrategy: util.ShardingStrategyShuffle, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{0}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{0}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -938,8 +940,8 @@ func TestSharding(t *testing.T) { shuffleShardSize: 1, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group1Token + 1, user1Group2Token + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group1Token + 1, user1Group2Token + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -956,8 +958,8 @@ func TestSharding(t *testing.T) { setupRing: func(desc *ring.Desc) { // Exact same tokens setup as previous test. - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group1Token + 1, user1Group2Token + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group1Token + 1, user1Group2Token + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -972,8 +974,8 @@ func TestSharding(t *testing.T) { shuffleShardSize: 1, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -992,9 +994,9 @@ func TestSharding(t *testing.T) { shuffleShardSize: 2, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1016,9 +1018,9 @@ func TestSharding(t *testing.T) { shuffleShardSize: 2, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 1) + 1, user1Group1Token + 1, user1Group2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, userToken(user3, 1) + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 1) + 1, user1Group1Token + 1, user1Group2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, userToken(user3, 1) + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1040,9 +1042,9 @@ func TestSharding(t *testing.T) { enabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1063,9 +1065,9 @@ func TestSharding(t *testing.T) { disabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1082,7 +1084,7 @@ func TestSharding(t *testing.T) { sharding: true, shardingAlgo: util.ShardingAlgoByRule, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ruler1: allRulesSharded}, }, @@ -1092,7 +1094,7 @@ func TestSharding(t *testing.T) { shardingAlgo: util.ShardingAlgoByRule, enabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ruler1: map[string]rulespb.RuleGroupList{ user1: { @@ -1108,7 +1110,7 @@ func TestSharding(t *testing.T) { shardingAlgo: util.ShardingAlgoByRule, disabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ruler1: map[string]rulespb.RuleGroupList{ user2: { @@ -1126,8 +1128,8 @@ func TestSharding(t *testing.T) { sharding: true, shardingAlgo: util.ShardingAlgoByRule, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1155,8 +1157,8 @@ func TestSharding(t *testing.T) { shardingAlgo: util.ShardingAlgoByRule, enabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1180,8 +1182,8 @@ func TestSharding(t *testing.T) { shardingAlgo: util.ShardingAlgoByRule, disabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1208,7 +1210,7 @@ func TestSharding(t *testing.T) { shardingAlgo: util.ShardingAlgoByRule, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) desc.Ingesters[ruler2] = ring.InstanceDesc{ Addr: ruler2Addr, Timestamp: time.Now().Add(-time.Hour).Unix(), @@ -1237,8 +1239,8 @@ func TestSharding(t *testing.T) { shardingAlgo: util.ShardingAlgoByRule, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.LEAVING, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.LEAVING, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1253,8 +1255,8 @@ func TestSharding(t *testing.T) { shardingAlgo: util.ShardingAlgoByRule, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.JOINING, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user2Group1Rule2Token + 1, user1Group1Rule2Token + 1}), ring.JOINING, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1270,7 +1272,7 @@ func TestSharding(t *testing.T) { shardingAlgo: util.ShardingAlgoByRule, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{0}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{0}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1285,9 +1287,9 @@ func TestSharding(t *testing.T) { shuffleShardSize: 1, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) // immaterial what tokens this ruler has, it won't be assigned any rules - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Rule1Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1305,9 +1307,9 @@ func TestSharding(t *testing.T) { setupRing: func(desc *ring.Desc) { // Exact same tokens setup as previous test. - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) // this ruler has all the rule tokens, so it gets all the rules - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user1Group1Rule2Token + 1, user1Group2Rule1Token + 1, user2Group1Rule1Token + 1, user2Group1Rule2Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group1Rule1Token + 1, user1Group1Rule2Token + 1, user1Group2Rule1Token + 1, user2Group1Rule1Token + 1, user2Group1Rule2Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1323,8 +1325,8 @@ func TestSharding(t *testing.T) { shuffleShardSize: 1, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1355,9 +1357,9 @@ func TestSharding(t *testing.T) { shuffleShardSize: 2, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Rule1Token + 1, user1Group2Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group1Rule2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Rule1Token + 1, user2Group1Rule2Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Rule1Token + 1, user1Group2Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group1Rule2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Rule1Token + 1, user2Group1Rule2Token + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1391,9 +1393,9 @@ func TestSharding(t *testing.T) { shuffleShardSize: 2, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 1) + 1, user1Group1Rule1Token + 1, user1Group1Rule2Token + 1, user1Group2Rule1Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 1) + 1, user1Group1Rule1Token + 1, user1Group1Rule2Token + 1, user1Group2Rule1Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1428,9 +1430,9 @@ func TestSharding(t *testing.T) { enabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 1) + 1, user1Group1Rule1Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, userToken(user3, 1) + 1, user1Group2Rule1Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 1) + 1, user1Group1Rule1Token + 1, user1Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, userToken(user3, 1) + 1, user1Group2Rule1Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ @@ -1457,9 +1459,9 @@ func TestSharding(t *testing.T) { disabledUsers: []string{user1}, setupRing: func(desc *ring.Desc) { - desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 1) + 1, user1Group1Rule1Token + 1, user1Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, userToken(user3, 1) + 1, user1Group2Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) - desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now()) + desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 1) + 1, user1Group1Rule1Token + 1, user1Group1Rule2Token + 1, user2Group1Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, userToken(user3, 1) + 1, user1Group2Rule1Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) + desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user3Group1Rule1Token + 1, user3Group1Rule2Token + 1, user2Group1Rule2Token + 1}), ring.ACTIVE, time.Now(), false, time.Now(), nil) }, expectedRules: expectedRulesMap{ diff --git a/pkg/util/ring/ring_config.go b/pkg/util/ring/ring_config.go index d64bea1759cc9..fdab53e84d5c9 100644 --- a/pkg/util/ring/ring_config.go +++ b/pkg/util/ring/ring_config.go @@ -59,7 +59,7 @@ func (cfg *RingConfig) RegisterFlagsWithPrefix(flagsPrefix, storePrefix string, // Ring flags cfg.KVStore.RegisterFlagsWithPrefix(flagsPrefix+"ring.", storePrefix, f.ToFlagSet()) - f.DurationVar(&cfg.HeartbeatPeriod, flagsPrefix+"ring.heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") + f.DurationVar(&cfg.HeartbeatPeriod, flagsPrefix+"ring.heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring.") f.DurationVar(&cfg.HeartbeatTimeout, flagsPrefix+"ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring. 0 = never (timeout disabled).") f.StringVar(&cfg.TokensFilePath, flagsPrefix+"ring.tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") f.BoolVar(&cfg.ZoneAwarenessEnabled, flagsPrefix+"ring.zone-awareness-enabled", false, "True to enable zone-awareness and replicate blocks across different availability zones.") diff --git a/vendor/github.com/grafana/dskit/clusterutil/cluster_validation_config.go b/vendor/github.com/grafana/dskit/clusterutil/cluster_validation_config.go index c0563db5f45dc..143004537cdf4 100644 --- a/vendor/github.com/grafana/dskit/clusterutil/cluster_validation_config.go +++ b/vendor/github.com/grafana/dskit/clusterutil/cluster_validation_config.go @@ -14,7 +14,7 @@ type ClusterValidationConfig struct { func (cfg *ClusterValidationConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { cfg.registeredFlags = flagext.TrackRegisteredFlags(prefix, f, func(prefix string, f *flag.FlagSet) { - f.StringVar(&cfg.Label, prefix+"label", "", "Optionally define the cluster validation label.") + f.StringVar(&cfg.Label, prefix+"label", "", "Primary cluster validation label.") }) } @@ -24,12 +24,35 @@ func (cfg *ClusterValidationConfig) RegisteredFlags() flagext.RegisteredFlags { type ServerClusterValidationConfig struct { ClusterValidationConfig `yaml:",inline"` + AdditionalLabels flagext.StringSliceCSV `yaml:"additional_labels" category:"experimental"` GRPC ClusterValidationProtocolConfig `yaml:"grpc" category:"experimental"` HTTP ClusterValidationProtocolConfigForHTTP `yaml:"http" category:"experimental"` registeredFlags flagext.RegisteredFlags `yaml:"-"` } +// GetAllowedClusterLabels returns the effective cluster validation labels. +// It combines the primary Label with any AdditionalLabels. +// The primary Label is always first if present, followed by AdditionalLabels. +func (cfg *ServerClusterValidationConfig) GetAllowedClusterLabels() []string { + if cfg.Label == "" && len(cfg.AdditionalLabels) == 0 { + return nil + } + + var labels []string + if cfg.Label != "" { + labels = append(labels, cfg.Label) + } + labels = append(labels, cfg.AdditionalLabels...) + return labels +} + func (cfg *ServerClusterValidationConfig) Validate() error { + // Validate that additional labels require primary label to be set + if len(cfg.AdditionalLabels) > 0 && cfg.Label == "" { + return fmt.Errorf("additional cluster validation labels require primary label to be set") + } + + // Protocol validation only checks against the primary label err := cfg.GRPC.Validate("grpc", cfg.Label) if err != nil { return err @@ -40,6 +63,7 @@ func (cfg *ServerClusterValidationConfig) Validate() error { func (cfg *ServerClusterValidationConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { cfg.registeredFlags = flagext.TrackRegisteredFlags(prefix, f, func(prefix string, f *flag.FlagSet) { cfg.ClusterValidationConfig.RegisterFlagsWithPrefix(prefix, f) + f.Var(&cfg.AdditionalLabels, prefix+"additional-labels", "Comma-separated list of additional cluster validation labels that the server will accept from incoming requests.") cfg.GRPC.RegisterFlagsWithPrefix(prefix+"grpc.", f) cfg.HTTP.RegisterFlagsWithPrefix(prefix+"http.", f) }) diff --git a/vendor/github.com/grafana/dskit/clusterutil/clusterutil.go b/vendor/github.com/grafana/dskit/clusterutil/clusterutil.go index 40aa07f498344..6a5ecc0c4c940 100644 --- a/vendor/github.com/grafana/dskit/clusterutil/clusterutil.go +++ b/vendor/github.com/grafana/dskit/clusterutil/clusterutil.go @@ -77,3 +77,14 @@ func GetClusterFromRequest(req *http.Request) (string, error) { } return clusterIDs[0], nil } + +// IsClusterAllowed checks if the provided cluster is in the list of allowed clusters. +// Returns true if the cluster is found in the allowedClusters slice. +func IsClusterAllowed(cluster string, allowedClusters []string) bool { + for _, allowed := range allowedClusters { + if cluster == allowed { + return true + } + } + return false +} diff --git a/vendor/github.com/grafana/dskit/dns/miekgdns/resolver.go b/vendor/github.com/grafana/dskit/dns/miekgdns/resolver.go index 73ee46c855729..ad35b379b555c 100644 --- a/vendor/github.com/grafana/dskit/dns/miekgdns/resolver.go +++ b/vendor/github.com/grafana/dskit/dns/miekgdns/resolver.go @@ -128,7 +128,13 @@ func (r *Resolver) lookupSRV(ctx context.Context, conf *dns.ClientConfig, servic Port: addr.Port, }) default: - return "", nil, fmt.Errorf("invalid SRV response record %s", record) + // Ignore unexpected non-SRV responses. This matches the behavior of the + // built-in go resolver. See https://github.com/grafana/mimir/issues/12713 + level.Debug(r.logger).Log( + "msg", "unexpected non-SRV response record", + "target", target, + "record", record, + ) } } diff --git a/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go b/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go index a49b3aea976b4..a7d3530e155d8 100644 --- a/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go +++ b/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go @@ -147,6 +147,7 @@ func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientIntercep // If cluster validation is enabled, ClusterUnaryClientInterceptor must be the last UnaryClientInterceptor // to wrap the real call. if cfg.ClusterValidation.Label != "" { + // For client side, we use the primary label as our cluster identity cfg.clusterUnaryClientInterceptor = middleware.ClusterUnaryClientInterceptor(cfg.ClusterValidation.Label, invalidClusterValidationReporter) unaryClientInterceptors = append(unaryClientInterceptors, cfg.clusterUnaryClientInterceptor) } diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/DESIGN.md b/vendor/github.com/grafana/dskit/kv/memberlist/DESIGN.md new file mode 100644 index 0000000000000..74ed258f975cc --- /dev/null +++ b/vendor/github.com/grafana/dskit/kv/memberlist/DESIGN.md @@ -0,0 +1,49 @@ +# Memberlist + +## Zone-aware routing + +Memberlist zone-aware routing is an optional feature that allows to significantly reduce the cross-AZ data transfer. + +When the feature is enabled, memberlist nodes can have one of these roles: + +- `member` +- `bridge` + +A **member** is a normal application instance. +A member only gossip and push/pull to other nodes (members and bridges) in the same zone. +This means that the bulk of data transfer done by the memberlist client running in a member node stays within the zone. + +A **bridge** is a special application instance that act as a bridge between multiple zones. +A bridge can gossip and push/pull to other nodes (members and bridges) in the same zone, and to the bridges in other zones (but not to members in the other zones). +In these regards, a bridge is effectively the component that allows two different zones to communicate. + +To ensure that messages are propagated between zones, the bridge prefers to communicate to other bridges first: + +- Broadcast messages: out of N nodes to select for broadcasting a message, the bridge always select at least 1 node from the pool of bridges in the other zones, and then select the remaining N-1 nodes randomly across the pool of nodes in the same zone + bridges in the other zones. +- Push/pull syncs: for the push/pull operations initiated by the bridge itself, the bridge always contact a random bridge in other zones. The only exception is if there are no bridges in other zones: in this case, the bridge will select a random node in its own zone. + +Node probes (health checks) can still cross the AZ boundaries, but these are typically a small fraction of the total data transfer done by memberlist. + +``` +┌───────────────────────┐ ┌────────────────────────┐ +│ │ │ │ +│ member-zone-a-1 │ │ member-zone-b-1 │ +│ ▲ ▲ │ │ ▲ ▲ │ +│ │ │ │ │ │ │ │ +│ │ │ │ │ │ │ │ +│ │ ▼ │ │ │ ▼ │ +│ member-zone-a-2 │ │ member-zone-b-2 │ +│ │ ▲ │ │ │ ▲ │ +│ │ │ │ │ │ │ │ +│ │ │ │ │ │ │ │ +│ ▼ ▼ │ │ ▼ ▼ │ +│ bridge-zone-a-1 ◄──│───│──► bridge-zone-b-1 │ +│ │ │ │ +└───────────────────────┘ └────────────────────────┘ + zone-a zone-b +``` + +The role and availability zone of each node are explicitly assigned using CLI flags, or their respective YAML configuration options: + +- `-memberlist.zone-aware-routing.instance-availability-zone` +- `-memberlist.zone-aware-routing.role` diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/http_status_handler.go b/vendor/github.com/grafana/dskit/kv/memberlist/http_status_handler.go index 3ecd83e387b64..88b69cc647c43 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/http_status_handler.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/http_status_handler.go @@ -29,6 +29,7 @@ type StatusPageData struct { MessageHistoryBufferBytes int SentMessages []Message ReceivedMessages []Message + ZoneAwareRouting ZoneAwareRoutingConfig } // NewHTTPStatusHandler creates a new HTTPStatusHandler that will render the provided template using the data from StatusPageData. @@ -108,6 +109,7 @@ func (h HTTPStatusHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { MessageHistoryBufferBytes: kv.cfg.MessageHistoryBufferBytes, SentMessages: sent, ReceivedMessages: received, + ZoneAwareRouting: kv.cfg.ZoneAwareRouting, } accept := req.Header.Get("Accept") @@ -216,4 +218,10 @@ func downloadKey(w http.ResponseWriter, kv *KV, store map[string]ValueDesc, key var defaultPageContent string var defaultPageTemplate = template.Must(template.New("webpage").Funcs(template.FuncMap{ "StringsJoin": strings.Join, + "GetZoneFromMeta": func(meta []byte) string { + return EncodedNodeMetadata(meta).Zone() + }, + "GetRoleFromMeta": func(meta []byte) string { + return EncodedNodeMetadata(meta).Role().String() + }, }).Parse(defaultPageContent)) diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go index da1a0c10ffe8c..7ebb643738cbb 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go @@ -178,6 +178,9 @@ type KVConfig struct { TCPTransport TCPTransportConfig `yaml:",inline"` + // Zone-aware routing configuration. + ZoneAwareRouting ZoneAwareRoutingConfig `yaml:"zone_aware_routing"` + MetricsNamespace string `yaml:"-"` // Codecs to register. Codecs need to be registered before joining other members. @@ -226,6 +229,7 @@ func (cfg *KVConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { f.IntVar(&cfg.WatchPrefixBufferSize, prefix+"memberlist.watch-prefix-buffer-size", watchPrefixBufferSize, "Size of the buffered channel for the WatchPrefix function.") cfg.TCPTransport.RegisterFlagsWithPrefix(f, prefix) + cfg.ZoneAwareRouting.RegisterFlagsWithPrefix(f, prefix+"memberlist.zone-aware-routing.") cfg.discoverMembersBackoff = backoff.Config{ MinBackoff: 100 * time.Millisecond, @@ -238,6 +242,11 @@ func (cfg *KVConfig) RegisterFlags(f *flag.FlagSet) { cfg.RegisterFlagsWithPrefix(f, "") } +// Validate validates the KV configuration. +func (cfg *KVConfig) Validate() error { + return cfg.ZoneAwareRouting.Validate() +} + func generateRandomSuffix(logger log.Logger) string { suffix := make([]byte, 4) _, err := crypto_rand.Read(suffix) @@ -269,6 +278,9 @@ type KV struct { localBroadcasts *memberlist.TransmitLimitedQueue // queue for messages generated locally gossipBroadcasts *memberlist.TransmitLimitedQueue // queue for messages that we forward from other nodes + // Node metadata for zone-aware routing (nil if zone-aware routing is disabled). + nodeMeta []byte + // KV Store. storeMu sync.RWMutex store map[string]ValueDesc @@ -481,11 +493,63 @@ func (m *KV) buildMemberlistConfig() (*memberlist.Config, error) { // node, because the TCP-based fallback will always trigger. mlCfg.DisableTcpPings = true + // Configure zone-aware routing if enabled. + if m.cfg.ZoneAwareRouting.Enabled { + if err := m.configureZoneAwareRouting(mlCfg); err != nil { + return nil, fmt.Errorf("failed to configure zone-aware routing: %w", err) + } + } + level.Info(m.logger).Log("msg", "Using memberlist cluster label and node name", "cluster_label", mlCfg.Label, "node", mlCfg.Name) return mlCfg, nil } +// configureZoneAwareRouting configures zone-aware routing for memberlist. +func (m *KV) configureZoneAwareRouting(mlCfg *memberlist.Config) error { + // Parse the role from the config string. + var role NodeRole + switch m.cfg.ZoneAwareRouting.Role { + case NodeRoleMember.String(): + role = NodeRoleMember + case NodeRoleBridge.String(): + role = NodeRoleBridge + default: + return fmt.Errorf("invalid zone-aware routing role: %s (valid values: %s, %s)", m.cfg.ZoneAwareRouting.Role, NodeRoleMember.String(), NodeRoleBridge.String()) + } + + // Encode the local node metadata. + localMeta, err := EncodeNodeMetadata(role, m.cfg.ZoneAwareRouting.Zone) + if err != nil { + return fmt.Errorf("failed to encode node metadata: %w", err) + } + + // Store the encoded metadata so NodeMeta() can return it. + m.nodeMeta = localMeta + + // Set up the node selection delegate. + mlCfg.NodeSelection = newZoneAwareNodeSelectionDelegate(role, m.cfg.ZoneAwareRouting.Zone, m.logger, m.registerer) + + // The bridge always prefer another bridge as first node. If the bridge only push/pull to 1 node per interval, then + // it will only communicate to bridges, potentially leading to network partitioning if the gossiping is not + // working to propagate changes. To reduce the likelihood of network partitioning when gossiping is not + // working and periodic push/pull is enabled, we configure the bridge to push/pull to 2 nodes per interval + // (the first node is a bridge, and the second node is selected randomly). + if role == NodeRoleBridge { + mlCfg.PushPullNodes = 2 + } else { + mlCfg.PushPullNodes = 1 + } + + level.Info(m.logger).Log( + "msg", "zone-aware routing enabled", + "zone", m.cfg.ZoneAwareRouting.Zone, + "role", role.String(), + ) + + return nil +} + func (m *KV) starting(ctx context.Context) error { mlCfg, err := m.buildMemberlistConfig() if err != nil { @@ -1295,9 +1359,9 @@ func (m *KV) broadcastNewValue(key string, change Mergeable, version uint, codec // NodeMeta is method from Memberlist Delegate interface func (m *KV) NodeMeta(_ int) []byte { - // we can send local state from here (512 bytes only) - // if state is updated, we need to tell memberlist to distribute it. - return nil + // Return the encoded node metadata if zone-aware routing is enabled. + // Otherwise, return nil (no metadata). + return m.nodeMeta } // NotifyMsg is method from Memberlist Delegate interface diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/node_meta.go b/vendor/github.com/grafana/dskit/kv/memberlist/node_meta.go new file mode 100644 index 0000000000000..d8e9882df69ff --- /dev/null +++ b/vendor/github.com/grafana/dskit/kv/memberlist/node_meta.go @@ -0,0 +1,82 @@ +package memberlist + +import ( + "fmt" + "unsafe" +) + +// EncodedNodeMetadata is the encoded binary representation of node metadata. +// It provides zero-allocation accessor methods to read role and zone. +type EncodedNodeMetadata []byte + +const ( + // Minimum metadata size for version 1 (version + role + zone_len with empty zone). + minMetadataSizeVersion1 = 3 +) + +// EncodeNodeMetadata encodes node metadata into a compact binary format. +// The encoding is versioned to allow future extensions while maintaining backward compatibility. +// +// We use a custom compact encoding rather than protobuf because memberlist has strict size limits +// for node metadata (typically 512 bytes) and this data is broadcast frequently in alive messages. +// +// Version 1 format: +// - Byte 0: Version (1 byte) +// - Byte 1: Role (1 byte): 1=member, 2=bridge +// - Byte 2: Zone length (1 byte): length of zone string (0-16) +// - Bytes 3+: Zone string (UTF-8, variable length) +// +// The maximum metadata size is 19 bytes (1 + 1 + 1 + 16). +func EncodeNodeMetadata(role NodeRole, zone string) ([]byte, error) { + // Validate zone name length. + zoneLen := len(zone) + if zoneLen > MaxZoneNameLength { + return nil, fmt.Errorf("zone name too long: %d bytes (max %d)", zoneLen, MaxZoneNameLength) + } + + // Allocate buffer: version(1) + role(1) + zone_len(1) + zone(variable). + buf := make([]byte, 3+zoneLen) + + // Encode version. + buf[0] = 1 + + // Encode role. + buf[1] = uint8(role) + + // Encode zone length. + buf[2] = uint8(zoneLen) + + // Encode zone string. + copy(buf[3:], zone) + + return buf, nil +} + +// Role returns the node role with zero allocations. +// Returns NodeRoleMember if the metadata is invalid. +func (e EncodedNodeMetadata) Role() NodeRole { + if len(e) < minMetadataSizeVersion1 || e[0] != 1 { + return NodeRoleMember + } + return NodeRole(e[1]) +} + +// Zone returns the zone name as a string with zero allocations. +// Returns empty string if the metadata is invalid. +// The returned string shares the underlying byte slice, so it's valid +// as long as the EncodedNodeMetadata is not modified. +func (e EncodedNodeMetadata) Zone() string { + if len(e) < minMetadataSizeVersion1 || e[0] != 1 { + return "" + } + zoneLen := int(e[2]) + if len(e) < 3+zoneLen { + return "" + } + if zoneLen == 0 { + return "" + } + // Zero-allocation conversion from []byte to string. + // Safe because we're not modifying the underlying data. + return unsafe.String(&e[3], zoneLen) +} diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/node_zone_aware_routing.go b/vendor/github.com/grafana/dskit/kv/memberlist/node_zone_aware_routing.go new file mode 100644 index 0000000000000..158ba109b906d --- /dev/null +++ b/vendor/github.com/grafana/dskit/kv/memberlist/node_zone_aware_routing.go @@ -0,0 +1,242 @@ +package memberlist + +import ( + "flag" + "fmt" + "math/rand" + "slices" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/hashicorp/memberlist" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// NodeRole represents the role of a node in the memberlist cluster. +type NodeRole uint8 + +const ( + // NodeRoleMember represents a standard member node. + NodeRoleMember NodeRole = 1 + // NodeRoleBridge represents a bridge node that connects different zones. + NodeRoleBridge NodeRole = 2 +) + +// String returns the string representation of the node role. +func (r NodeRole) String() string { + switch r { + case NodeRoleMember: + return "member" + case NodeRoleBridge: + return "bridge" + default: + return fmt.Sprintf("unknown(%d)", r) + } +} + +const ( + // MaxZoneNameLength is the maximum zone name length (to keep metadata compact). + MaxZoneNameLength = 16 + + // Role configuration values. + roleConfigMember = "member" + roleConfigBridge = "bridge" +) + +// ZoneAwareRoutingConfig holds configuration for zone-aware routing in memberlist. +type ZoneAwareRoutingConfig struct { + Enabled bool `yaml:"enabled" category:"experimental"` + Zone string `yaml:"instance_availability_zone" category:"experimental"` + Role string `yaml:"role" category:"experimental"` +} + +// RegisterFlagsWithPrefix registers flags with the given prefix. +func (cfg *ZoneAwareRoutingConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + f.BoolVar(&cfg.Enabled, prefix+"enabled", false, "Enable zone-aware routing for memberlist gossip.") + f.StringVar(&cfg.Zone, prefix+"instance-availability-zone", "", "Availability zone where this node is running.") + f.StringVar(&cfg.Role, prefix+"role", roleConfigMember, fmt.Sprintf("Role of this node in the cluster. Valid values: %s, %s.", roleConfigMember, roleConfigBridge)) +} + +// Validate validates the zone-aware routing configuration. +func (cfg *ZoneAwareRoutingConfig) Validate() error { + // Only validate if enabled. + if !cfg.Enabled { + return nil + } + + // Zone must be set. + if cfg.Zone == "" { + return fmt.Errorf("zone-aware routing is enabled but zone is not set") + } + + // Zone length must not exceed maximum. + if len(cfg.Zone) > MaxZoneNameLength { + return fmt.Errorf("zone name too long: %d bytes (max %d)", len(cfg.Zone), MaxZoneNameLength) + } + + // Role must be valid. + if cfg.Role != NodeRoleMember.String() && cfg.Role != NodeRoleBridge.String() { + return fmt.Errorf("invalid role: %s (valid values: %s, %s)", cfg.Role, NodeRoleMember.String(), NodeRoleBridge.String()) + } + + return nil +} + +// zoneAwareNodeSelectionDelegate implements the memberlist.NodeSelectionDelegate interface +// to provide zone-aware routing for gossip, probing, and push/pull operations. +type zoneAwareNodeSelectionDelegate struct { + localRole NodeRole + localZone string + logger log.Logger + + // Metrics + selectNodesCalls prometheus.Counter + selectNodesCallsSkipped prometheus.Counter +} + +// newZoneAwareNodeSelectionDelegate creates a new zone-aware node selection delegate. +func newZoneAwareNodeSelectionDelegate(localRole NodeRole, localZone string, logger log.Logger, registerer prometheus.Registerer) *zoneAwareNodeSelectionDelegate { + return &zoneAwareNodeSelectionDelegate{ + localRole: localRole, + localZone: localZone, + logger: logger, + selectNodesCalls: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "memberlist_client_zone_aware_routing_select_nodes_total", + Help: "Total number of times memberlist attempted to select node candidates for gossiping (tracked only when when zone-aware routing is enabled).", + }), + selectNodesCallsSkipped: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "memberlist_client_zone_aware_routing_select_nodes_skipped_total", + Help: "Total number of times memberlist zone-aware routing was skipped because the local zone is unknown or a zone has no alive bridges.", + }), + } +} + +// SelectNodes implements memberlist.NodeSelectionDelegate. +// It determines which remote nodes should be selected for gossip operations and which one should be preferred. +func (d *zoneAwareNodeSelectionDelegate) SelectNodes(nodes []*memberlist.NodeState) (selected []*memberlist.NodeState, preferred *memberlist.NodeState) { + d.selectNodesCalls.Inc() + + if d.localRole != NodeRoleMember && d.localRole != NodeRoleBridge { + level.Warn(d.logger).Log("msg", "memberlist zone-aware routing is running with an unknown role", "role", d.localRole) + } + + // Skip zone-aware routing if local zone is not set. + if d.localZone == "" { + d.selectNodesCallsSkipped.Inc() + return nodes, nil + } + + // Pre-allocate backing arrays on the stack for up to 5 zones (common case). + zonesWithMembers := make([]string, 0, 5) + zonesWithAliveBridges := make([]string, 0, 5) + + // Build selected slice and track zones in a single pass. + selected = make([]*memberlist.NodeState, 0, len(nodes)) + preferredCount := 0 // Count of preferred candidates seen (for reservoir sampling). + + for _, node := range nodes { + remoteMeta := EncodedNodeMetadata(node.Meta) + remoteZone := remoteMeta.Zone() + remoteRole := remoteMeta.Role() + + // Track zones to check if any zone has members but no alive bridges. + if remoteZone != "" { + if remoteRole == NodeRoleBridge { + // Only count alive bridges. + if node.State == memberlist.StateAlive { + if !containsZone(zonesWithAliveBridges, remoteZone) { + zonesWithAliveBridges = append(zonesWithAliveBridges, remoteZone) + slices.Sort(zonesWithAliveBridges) + } + } + } else { + if !containsZone(zonesWithMembers, remoteZone) { + zonesWithMembers = append(zonesWithMembers, remoteZone) + slices.Sort(zonesWithMembers) + } + } + } + + // Apply zone-aware selection. + isSelected, isPreferred := d.selectNode(remoteZone, remoteRole) + if isSelected { + selected = append(selected, node) + if isPreferred { + preferredCount++ + // Reservoir sampling: select this node with a probability of 1/preferredCount. + if rand.Intn(preferredCount) == 0 { + preferred = node + } + } + } + } + + // Skip zone-aware routing if any zone has members but no alive bridges. + // This prevents network partitioning when bridges are missing or dead. + for _, zone := range zonesWithMembers { + if !slices.Contains(zonesWithAliveBridges, zone) { + d.selectNodesCallsSkipped.Inc() + level.Warn(d.logger).Log("msg", "memberlist zone-aware routing is skipped because a zone has no alive bridge", "zone", zone) + return nodes, nil + } + } + + return selected, preferred +} + +// selectNode determines whether a remote node should be selected for gossip operations +// and whether it should be considered a preferred candidate. +func (d *zoneAwareNodeSelectionDelegate) selectNode(remoteZone string, remoteRole NodeRole) (selected, preferredCandidate bool) { + // If the remote zone is unknown, select the node but don't prefer it. + // This prevents network partitioning: if every other memberlist node filters it out, then that + // remote node would not receive updates and would get isolated. + if remoteZone == "" { + return true, false + } + + switch d.localRole { + case NodeRoleMember: + // Members only select nodes in the same zone. + if remoteZone == d.localZone { + return true, false + } + return false, false + + case NodeRoleBridge: + // Bridges select nodes in the same zone + bridge nodes in other zones. + if remoteZone == d.localZone { + // Same zone: select but don't prefer. + return true, false + } + // Different zone: only select if it's a bridge node, and prefer it. + if remoteRole == NodeRoleBridge { + return true, true + } + return false, false + + default: + // Unknown role: select but don't prefer (should never happen). + return true, false + } +} + +// containsZone checks whether zones slice contains a given zone. +// It's optimized for a sorted zones slice, and zones that end with 'a' to 'z'. +// When all zones from 'a' to last one are present, it attempts to check only the expected position. +func containsZone(zones []string, zone string) bool { + if zone == "" { + return slices.Contains(zones, zone) + } + optimisticIndex := int(zone[len(zone)-1]) - 'a' + if optimisticIndex < 0 || len(zones) <= optimisticIndex { + return slices.Contains(zones, zone) + } + + if zones[optimisticIndex] == zone { + return true // Here's where we skip the strings check. + } + + // Bad luck, just check slices.Contains. + return slices.Contains(zones, zone) +} diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/status.gohtml b/vendor/github.com/grafana/dskit/kv/memberlist/status.gohtml index 524acb80900da..1b820ac1f1631 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/status.gohtml +++ b/vendor/github.com/grafana/dskit/kv/memberlist/status.gohtml @@ -12,6 +12,7 @@
  • Health Score: {{ .Memberlist.GetHealthScore }} (lower = better, 0 = healthy)
  • Members: {{ .Memberlist.NumMembers }}
  • +
  • Zone-aware routing: {{ if .ZoneAwareRouting.Enabled }}enabled{{ else }}disabled{{ end }}

KV Store

@@ -58,6 +59,10 @@ Name Address State + {{ if .ZoneAwareRouting.Enabled }} + Zone + Role + {{ end }} @@ -67,6 +72,10 @@ {{ .Name }} {{ .Address }} {{ .State }} + {{ if $.ZoneAwareRouting.Enabled }} + {{ GetZoneFromMeta .Meta }} + {{ GetRoleFromMeta .Meta }} + {{ end }} {{ end }} diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport.go b/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport.go index 241d25b717403..8777ca2bcb612 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport.go @@ -125,6 +125,9 @@ type TCPTransport struct { sentPacketsErrors prometheus.Counter droppedPackets prometheus.Counter unknownConnections prometheus.Counter + + sentBytes prometheus.Counter + receivedBytes prometheus.Counter } // NewTCPTransport returns a new tcp-based transport with the given configuration. On @@ -272,6 +275,9 @@ func (t *TCPTransport) debugLog() log.Logger { func (t *TCPTransport) handleConnection(conn net.Conn) { t.debugLog().Log("msg", "New connection", "addr", conn.RemoteAddr()) + // Wrap the connection to track sent/received bytes. + conn = newMeteredConn(conn, t.sentBytes, t.receivedBytes) + closeConn := true defer func() { if closeConn { @@ -364,10 +370,17 @@ func (a addr) String() string { } func (t *TCPTransport) getConnection(addr string, timeout time.Duration) (net.Conn, error) { + var conn net.Conn + var err error if t.cfg.TLSEnabled { - return tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", addr, t.tlsConfig) + conn, err = tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", addr, t.tlsConfig) + } else { + conn, err = net.DialTimeout("tcp", addr, timeout) } - return net.DialTimeout("tcp", addr, timeout) + if err != nil { + return nil, err + } + return newMeteredConn(conn, t.sentBytes, t.receivedBytes), nil } // GetAutoBindPort returns the bind port that was automatically given by the @@ -673,7 +686,7 @@ func (t *TCPTransport) registerMetrics(registerer prometheus.Registerer) { Namespace: t.cfg.MetricsNamespace, Subsystem: subsystem, Name: "packets_received_bytes_total", - Help: "Total bytes received as packets", + Help: "Total bytes received as packets. This metric only tracks broadcast packets, and does not include full state syncs or pings.", }) t.receivedPacketsErrors = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ @@ -701,7 +714,7 @@ func (t *TCPTransport) registerMetrics(registerer prometheus.Registerer) { Namespace: t.cfg.MetricsNamespace, Subsystem: subsystem, Name: "packets_sent_bytes_total", - Help: "Total bytes sent as packets", + Help: "Total bytes sent as packets. This metric only tracks broadcast packets, and does not include full state syncs or pings.", }) t.sentPacketsErrors = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ @@ -717,4 +730,18 @@ func (t *TCPTransport) registerMetrics(registerer prometheus.Registerer) { Name: "unknown_connections_total", Help: "Number of unknown TCP connections (not a packet or stream)", }) + + t.sentBytes = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Namespace: t.cfg.MetricsNamespace, + Subsystem: subsystem, + Name: "sent_bytes_total", + Help: "Total bytes sent by the transport. This metric tracks all data transferred, including broadcast packets, full state syncs, and pings.", + }) + + t.receivedBytes = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Namespace: t.cfg.MetricsNamespace, + Subsystem: subsystem, + Name: "received_bytes_total", + Help: "Total bytes sent by the transport. This metric tracks all data transferred, including broadcast packets, full state syncs, and pings.", + }) } diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport_conn.go b/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport_conn.go new file mode 100644 index 0000000000000..51752af55992d --- /dev/null +++ b/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport_conn.go @@ -0,0 +1,104 @@ +package memberlist + +import ( + "net" + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" +) + +const ( + // flushThreshold is the number of bytes after which we flush the local counters + // to the Prometheus counters. This reduces the frequency of expensive Prometheus + // counters updates. + flushThreshold = 64 * 1024 // 64KB +) + +// meteredConn wraps a net.Conn to track the number of bytes sent and received. +// It periodically flushes the byte counts to Prometheus counters when a threshold +// is reached, and always flushes on Close() to ensure accurate tracking for +// short-lived connections. +type meteredConn struct { + net.Conn + + sentBytesCounter prometheus.Counter + receivedBytesCounter prometheus.Counter + + sentBytesLocal atomic.Int64 + receivedBytesLocal atomic.Int64 +} + +// newMeteredConn creates a new metered connection wrapper. +func newMeteredConn(conn net.Conn, sentBytes, receivedBytes prometheus.Counter) *meteredConn { + return &meteredConn{ + Conn: conn, + sentBytesCounter: sentBytes, + receivedBytesCounter: receivedBytes, + } +} + +// Read implement net.Conn. +func (c *meteredConn) Read(b []byte) (n int, err error) { + n, err = c.Conn.Read(b) + if n > 0 { + c.addReceivedBytes(int64(n)) + } + return n, err +} + +// Write implement net.Conn. +func (c *meteredConn) Write(b []byte) (n int, err error) { + n, err = c.Conn.Write(b) + if n > 0 { + c.addSentBytes(int64(n)) + } + return n, err +} + +// Close implement net.Conn. +func (c *meteredConn) Close() error { + c.flush() + return c.Conn.Close() +} + +// SetDeadline implements net.Conn. +func (c *meteredConn) SetDeadline(t time.Time) error { + return c.Conn.SetDeadline(t) +} + +// SetReadDeadline implements net.Conn. +func (c *meteredConn) SetReadDeadline(t time.Time) error { + return c.Conn.SetReadDeadline(t) +} + +// SetWriteDeadline implements net.Conn. +func (c *meteredConn) SetWriteDeadline(t time.Time) error { + return c.Conn.SetWriteDeadline(t) +} + +// addSentBytes adds bytes to the local sent counter and flushes to Prometheus if threshold is reached. +func (c *meteredConn) addSentBytes(n int64) { + newTotal := c.sentBytesLocal.Add(n) + if newTotal >= flushThreshold { + c.flush() + } +} + +// addReceivedBytes adds bytes to the local received counter and flushes to Prometheus if threshold is reached. +func (c *meteredConn) addReceivedBytes(n int64) { + newTotal := c.receivedBytesLocal.Add(n) + if newTotal >= flushThreshold { + c.flush() + } +} + +// flush flushes any remaining local byte counters to the Prometheus counters. +func (c *meteredConn) flush() { + if sentBytes := c.sentBytesLocal.Swap(0); sentBytes > 0 { + c.sentBytesCounter.Add(float64(sentBytes)) + } + if receivedBytes := c.receivedBytesLocal.Swap(0); receivedBytes > 0 { + c.receivedBytesCounter.Add(float64(receivedBytes)) + } +} diff --git a/vendor/github.com/grafana/dskit/kv/multi.go b/vendor/github.com/grafana/dskit/kv/multi.go index e1e461ea1f281..81e27db7619aa 100644 --- a/vendor/github.com/grafana/dskit/kv/multi.go +++ b/vendor/github.com/grafana/dskit/kv/multi.go @@ -30,8 +30,8 @@ type MultiConfig struct { func (cfg *MultiConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { f.StringVar(&cfg.Primary, prefix+"multi.primary", "", "Primary backend storage used by multi-client.") f.StringVar(&cfg.Secondary, prefix+"multi.secondary", "", "Secondary backend storage used by multi-client.") - f.BoolVar(&cfg.MirrorEnabled, prefix+"multi.mirror-enabled", false, "Mirror writes to secondary store.") - f.DurationVar(&cfg.MirrorTimeout, prefix+"multi.mirror-timeout", 2*time.Second, "Timeout for storing value to secondary store.") + f.BoolVar(&cfg.MirrorEnabled, prefix+"multi.mirror-enabled", false, "Mirror writes to the secondary store.") + f.DurationVar(&cfg.MirrorTimeout, prefix+"multi.mirror-timeout", 2*time.Second, "Timeout for storing a value to the secondary store.") } // MultiRuntimeConfig has values that can change in runtime (via overrides) diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_cluster.go b/vendor/github.com/grafana/dskit/middleware/grpc_cluster.go index f7a701389d759..0c1d020c7177d 100644 --- a/vendor/github.com/grafana/dskit/middleware/grpc_cluster.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_cluster.go @@ -69,19 +69,19 @@ func handleClusterValidationError(err error, method string, invalidClusterValida } // ClusterUnaryServerInterceptor checks if the incoming gRPC metadata contains any cluster label and if so, checks if -// the latter corresponds to the given cluster label. If it is the case, the request is further propagated. -// If an empty cluster label or nil logger are provided, ClusterUnaryServerInterceptor panics. +// the latter corresponds to one of the given cluster labels. If it is the case, the request is further propagated. +// If empty cluster labels or nil logger are provided, ClusterUnaryServerInterceptor panics. // If the softValidation parameter is true, errors related to the cluster label validation are logged, but not returned. // Otherwise, an error is returned. -func ClusterUnaryServerInterceptor(cluster string, softValidation bool, invalidClusterRequests *prometheus.CounterVec, logger log.Logger) grpc.UnaryServerInterceptor { - validateClusterServerInterceptorInputParameters(cluster, logger) +func ClusterUnaryServerInterceptor(clusters []string, softValidation bool, invalidClusterRequests *prometheus.CounterVec, logger log.Logger) grpc.UnaryServerInterceptor { + validateClusterServerInterceptorInputParameters(clusters, logger) return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { // We skip the gRPC health check. if _, ok := info.Server.(healthpb.HealthServer); ok { return handler(ctx, req) } - if err := checkClusterFromIncomingContext(ctx, info.FullMethod, cluster, softValidation, invalidClusterRequests, logger); err != nil { + if err := checkClusterFromIncomingContext(ctx, info.FullMethod, clusters, softValidation, invalidClusterRequests, logger); err != nil { stat := grpcutil.Status(codes.FailedPrecondition, err.Error(), &grpcutil.ErrorDetails{Cause: grpcutil.WRONG_CLUSTER_VALIDATION_LABEL}) return nil, stat.Err() } @@ -89,9 +89,9 @@ func ClusterUnaryServerInterceptor(cluster string, softValidation bool, invalidC } } -func validateClusterServerInterceptorInputParameters(cluster string, logger log.Logger) { - if cluster == "" { - panic("no cluster label provided") +func validateClusterServerInterceptorInputParameters(clusters []string, logger log.Logger) { + if len(clusters) == 0 { + panic("no cluster labels provided") } if logger == nil { panic("no logger provided") @@ -99,18 +99,18 @@ func validateClusterServerInterceptorInputParameters(cluster string, logger log. } func checkClusterFromIncomingContext( - ctx context.Context, method string, expectedCluster string, softValidationEnabled bool, + ctx context.Context, method string, expectedClusters []string, softValidationEnabled bool, invalidClusterRequests *prometheus.CounterVec, logger log.Logger, ) error { reqCluster, err := clusterutil.GetClusterFromIncomingContext(ctx) - if err == nil && reqCluster == expectedCluster { + if err == nil && clusterutil.IsClusterAllowed(reqCluster, expectedClusters) { return nil } logger = log.With( logger, "method", method, - "cluster_validation_label", expectedCluster, + "cluster_validation_labels", fmt.Sprintf("%v", expectedClusters), "soft_validation", softValidationEnabled, ) if tenantID, err := user.ExtractOrgID(ctx); err == nil { @@ -127,10 +127,12 @@ func checkClusterFromIncomingContext( // No error, but request's and server's cluster validation labels didn't match. var wrongClusterErr error if !softValidationEnabled { - wrongClusterErr = fmt.Errorf("rejected request with wrong cluster validation label %q - it should be %q", reqCluster, expectedCluster) + wrongClusterErr = fmt.Errorf("rejected request with wrong cluster validation label %q - it should be one of %v", reqCluster, expectedClusters) } - invalidClusterRequests.WithLabelValues("grpc", method, expectedCluster, reqCluster).Inc() + // Use first expected cluster for metrics compatibility + expectedClusterForMetrics := expectedClusters[0] + invalidClusterRequests.WithLabelValues("grpc", method, expectedClusterForMetrics, reqCluster).Inc() level.Warn(logger).Log("msg", "request with wrong cluster validation label", "request_cluster_validation_label", reqCluster) return wrongClusterErr } @@ -138,10 +140,12 @@ func checkClusterFromIncomingContext( if errors.Is(err, clusterutil.ErrNoClusterValidationLabel) { var emptyClusterErr error if !softValidationEnabled { - emptyClusterErr = fmt.Errorf("rejected request with empty cluster validation label - it should be %q", expectedCluster) + emptyClusterErr = fmt.Errorf("rejected request with empty cluster validation label - it should be one of %v", expectedClusters) } - invalidClusterRequests.WithLabelValues("grpc", method, expectedCluster, "").Inc() + // Use first expected cluster for metrics compatibility + expectedClusterForMetrics := expectedClusters[0] + invalidClusterRequests.WithLabelValues("grpc", method, expectedClusterForMetrics, "").Inc() level.Warn(logger).Log("msg", "request with no cluster validation label") return emptyClusterErr } @@ -151,7 +155,9 @@ func checkClusterFromIncomingContext( rejectedRequestErr = fmt.Errorf("rejected request: %w", err) } - invalidClusterRequests.WithLabelValues("grpc", method, expectedCluster, "").Inc() + // Use first expected cluster for metrics compatibility + expectedClusterForMetrics := expectedClusters[0] + invalidClusterRequests.WithLabelValues("grpc", method, expectedClusterForMetrics, "").Inc() level.Warn(logger).Log("msg", "detected error during cluster validation label extraction", "err", err) return rejectedRequestErr } diff --git a/vendor/github.com/grafana/dskit/middleware/http_cluster.go b/vendor/github.com/grafana/dskit/middleware/http_cluster.go index 4578d36184c6b..3b1a042ecacff 100644 --- a/vendor/github.com/grafana/dskit/middleware/http_cluster.go +++ b/vendor/github.com/grafana/dskit/middleware/http_cluster.go @@ -81,14 +81,14 @@ func validateClusterValidationRoundTripperInputParameters(cluster string, invali } // ClusterValidationMiddleware validates that requests have the correct cluster validation label. -// If an empty cluster label or nil logger are provided, ClusterValidationMiddleware panics. +// If empty cluster labels or nil logger are provided, ClusterValidationMiddleware panics. // The check is ignored if the request's path belongs to the list of excluded paths or if the User-Agent matches any of the excluded user agents. // If the softValidation parameter is true, errors related to the cluster label validation are logged, but not returned. // Otherwise, an error is returned. func ClusterValidationMiddleware( - cluster string, cfg clusterutil.ClusterValidationProtocolConfigForHTTP, invalidClusterRequests *prometheus.CounterVec, logger log.Logger, + clusters []string, cfg clusterutil.ClusterValidationProtocolConfigForHTTP, invalidClusterRequests *prometheus.CounterVec, logger log.Logger, ) Interface { - validateClusterValidationMiddlewareInputParameters(cluster, logger) + validateClusterValidationMiddlewareInputParameters(clusters, logger) var reB strings.Builder // Allow for a potential path prefix being configured. reB.WriteString(".*/(metrics|debug/pprof.*|ready") @@ -119,7 +119,7 @@ func ClusterValidationMiddleware( if route == "" { route = "" } - if err := checkClusterFromRequest(r, cluster, route, cfg.SoftValidation, reExcludedPath, reExcludedUserAgent, invalidClusterRequests, logger); err != nil { + if err := checkClusterFromRequest(r, clusters, route, cfg.SoftValidation, reExcludedPath, reExcludedUserAgent, invalidClusterRequests, logger); err != nil { clusterValidationErr := clusterValidationError{ ClusterValidationErrorMessage: err.Error(), Route: route, @@ -132,9 +132,9 @@ func ClusterValidationMiddleware( }) } -func validateClusterValidationMiddlewareInputParameters(cluster string, logger log.Logger) { - if cluster == "" { - panic("no cluster label provided") +func validateClusterValidationMiddlewareInputParameters(clusters []string, logger log.Logger) { + if len(clusters) == 0 { + panic("no cluster labels provided") } if logger == nil { panic("no logger provided") @@ -142,7 +142,7 @@ func validateClusterValidationMiddlewareInputParameters(cluster string, logger l } func checkClusterFromRequest( - r *http.Request, expectedCluster, route string, softValidationEnabled bool, reExcludedPath *regexp.Regexp, reExcludedUserAgent *regexp.Regexp, + r *http.Request, expectedClusters []string, route string, softValidationEnabled bool, reExcludedPath *regexp.Regexp, reExcludedUserAgent *regexp.Regexp, invalidClusterRequests *prometheus.CounterVec, logger log.Logger, ) error { if reExcludedPath != nil && reExcludedPath.MatchString(r.URL.Path) { @@ -158,7 +158,7 @@ func checkClusterFromRequest( } reqCluster, err := clusterutil.GetClusterFromRequest(r) - if err == nil && reqCluster == expectedCluster { + if err == nil && clusterutil.IsClusterAllowed(reqCluster, expectedClusters) { return nil } @@ -166,7 +166,7 @@ func checkClusterFromRequest( logger, "path", r.URL.Path, "method", r.Method, - "cluster_validation_label", expectedCluster, + "cluster_validation_labels", fmt.Sprintf("%v", expectedClusters), "soft_validation", softValidationEnabled, "tenant", r.Header.Get(user.OrgIDHeaderName), "user_agent", r.Header.Get("User-Agent"), @@ -181,10 +181,12 @@ func checkClusterFromRequest( // No error, but request's and server's cluster validation labels didn't match. var wrongClusterErr error if !softValidationEnabled { - wrongClusterErr = fmt.Errorf("rejected request with wrong cluster validation label %q - it should be %q", reqCluster, expectedCluster) + wrongClusterErr = fmt.Errorf("rejected request with wrong cluster validation label %q - it should be one of %v", reqCluster, expectedClusters) } - invalidClusterRequests.WithLabelValues("http", route, expectedCluster, reqCluster).Inc() + // Use first expected cluster for metrics compatibility + expectedClusterForMetrics := expectedClusters[0] + invalidClusterRequests.WithLabelValues("http", route, expectedClusterForMetrics, reqCluster).Inc() level.Warn(logger).Log("msg", "request with wrong cluster validation label", "request_cluster_validation_label", reqCluster) return wrongClusterErr } @@ -192,10 +194,12 @@ func checkClusterFromRequest( if errors.Is(err, clusterutil.ErrNoClusterValidationLabelInHeader) { var emptyClusterErr error if !softValidationEnabled { - emptyClusterErr = fmt.Errorf("rejected request with empty cluster validation label - it should be %q", expectedCluster) + emptyClusterErr = fmt.Errorf("rejected request with empty cluster validation label - it should be one of %v", expectedClusters) } - invalidClusterRequests.WithLabelValues("http", route, expectedCluster, "").Inc() + // Use first expected cluster for metrics compatibility + expectedClusterForMetrics := expectedClusters[0] + invalidClusterRequests.WithLabelValues("http", route, expectedClusterForMetrics, "").Inc() level.Warn(logger).Log("msg", "request with no cluster validation label") return emptyClusterErr } @@ -205,7 +209,9 @@ func checkClusterFromRequest( rejectedRequestErr = fmt.Errorf("rejected request: %w", err) } - invalidClusterRequests.WithLabelValues("http", route, expectedCluster, "").Inc() + // Use first expected cluster for metrics compatibility + expectedClusterForMetrics := expectedClusters[0] + invalidClusterRequests.WithLabelValues("http", route, expectedClusterForMetrics, "").Inc() level.Warn(logger).Log("msg", "detected error during cluster validation label extraction", "err", err) return rejectedRequestErr } diff --git a/vendor/github.com/grafana/dskit/middleware/http_tracing.go b/vendor/github.com/grafana/dskit/middleware/http_tracing.go index ae947551c23c4..f11ce05952c54 100644 --- a/vendor/github.com/grafana/dskit/middleware/http_tracing.go +++ b/vendor/github.com/grafana/dskit/middleware/http_tracing.go @@ -16,7 +16,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" // otelhttp uses semconv v1.20.0 so we stick to the same version in order to produce consistent attributes on HTTP and HTTPGRPC spans. + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" // otelhttp uses semconv v1.37.0 so we stick to the same version in order to produce consistent attributes on HTTP and HTTPGRPC spans. "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" @@ -254,9 +254,9 @@ func handleHTTPGRPCRequestWithOTel(ctx context.Context, req any, httpRequest *ht parentSpan := trace.SpanFromContext(ctx) if parentSpan.SpanContext().IsValid() { parentSpan.SetAttributes( - semconv.HTTPMethod(method), + semconv.HTTPRequestMethodKey.String(method), semconv.HTTPRouteKey.String(routeName), - attribute.String("http.target", urlPath), + attribute.String("url.path", urlPath), semconv.UserAgentOriginal(userAgent), ) } @@ -266,10 +266,10 @@ func handleHTTPGRPCRequestWithOTel(ctx context.Context, req any, httpRequest *ht startSpanOpts := []trace.SpanStartOption{ trace.WithSpanKind(trace.SpanKindServer), trace.WithAttributes( - semconv.HTTPMethod(method), + semconv.HTTPRequestMethodKey.String(method), semconv.HTTPRouteKey.String(routeName), semconv.UserAgentOriginal(userAgent), - attribute.String("http.target", urlPath), + attribute.String("url.path", urlPath), ), } diff --git a/vendor/github.com/grafana/dskit/middleware/route_injector.go b/vendor/github.com/grafana/dskit/middleware/route_injector.go index 7b275f74f7564..e4f2d8c3e65f0 100644 --- a/vendor/github.com/grafana/dskit/middleware/route_injector.go +++ b/vendor/github.com/grafana/dskit/middleware/route_injector.go @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-License-Identifier: Apache-2.0 package middleware diff --git a/vendor/github.com/grafana/dskit/ring/basic_lifecycler.go b/vendor/github.com/grafana/dskit/ring/basic_lifecycler.go index e32b20f0a035c..e84ccf0903c15 100644 --- a/vendor/github.com/grafana/dskit/ring/basic_lifecycler.go +++ b/vendor/github.com/grafana/dskit/ring/basic_lifecycler.go @@ -53,8 +53,6 @@ type BasicLifecyclerConfig struct { HeartbeatTimeout time.Duration TokensObservePeriod time.Duration NumTokens int - // HideTokensInStatusPage allows tokens to be hidden from management tools e.g. the status page, for use in contexts which do not utilize tokens. - HideTokensInStatusPage bool // If true lifecycler doesn't unregister instance from the ring when it's stopping. Default value is false, // which means unregistering. @@ -63,6 +61,11 @@ type BasicLifecyclerConfig struct { // If set, specifies the TokenGenerator implementation that will be used for generating tokens. // Default value is nil, which means that RandomTokenGenerator is used. RingTokenGenerator TokenGenerator + + // Versions are the component versions associated with this instance. + Versions InstanceVersions + + StatusPageConfig StatusPageConfig } /* @@ -345,7 +348,7 @@ func (l *BasicLifecycler) registerInstance(ctx context.Context) error { // Always overwrite the instance in the ring (even if already exists) because some properties // may have changed (stated, tokens, zone, address) and even if they didn't the heartbeat at // least did. - instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, tokens, state, registeredAt, readOnly, readOnlyUpdatedTimestamp) + instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, tokens, state, registeredAt, readOnly, readOnlyUpdatedTimestamp, l.cfg.Versions) return ringDesc, true, nil }) @@ -473,7 +476,7 @@ func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, // registration timestamp to current time. registeredAt := time.Now() readOnly, readOnlyUpdatedTimestamp := l.GetReadOnlyState() - instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, l.GetTokens(), l.GetState(), registeredAt, readOnly, readOnlyUpdatedTimestamp) + instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, l.GetTokens(), l.GetState(), registeredAt, readOnly, readOnlyUpdatedTimestamp, l.cfg.Versions) } prevTimestamp := instanceDesc.Timestamp @@ -594,5 +597,5 @@ func (l *BasicLifecycler) getRing(ctx context.Context) (*Desc, error) { } func (l *BasicLifecycler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - newRingPageHandler(l, l.cfg.HeartbeatTimeout, l.cfg.HideTokensInStatusPage).handle(w, req) + newRingPageHandler(l, l.cfg.HeartbeatTimeout, l.cfg.StatusPageConfig).handle(w, req) } diff --git a/vendor/github.com/grafana/dskit/ring/client/pool.go b/vendor/github.com/grafana/dskit/ring/client/pool.go index 4275259aa4580..8efbc89b6dd84 100644 --- a/vendor/github.com/grafana/dskit/ring/client/pool.go +++ b/vendor/github.com/grafana/dskit/ring/client/pool.go @@ -56,9 +56,18 @@ type PoolServiceDiscovery func() ([]string, error) // PoolConfig is config for creating a Pool. type PoolConfig struct { - CheckInterval time.Duration - HealthCheckEnabled bool - HealthCheckTimeout time.Duration + CheckInterval time.Duration + HealthCheckEnabled bool + HealthCheckTimeout time.Duration + + // HealthCheckGracePeriod is the duration during which the health check is allowed to fail before the client is removed + // from the pool. + // For example, if the grace period is 60s, and the health check consistently fails during this period, then the client + // will be removed from the pool at the end of this period. + // However, if the health check passes at any point during this period, then the grace period is reset and starts again + // when the health check next fails. + HealthCheckGracePeriod time.Duration + MaxConcurrentHealthChecks int // defaults to 16 } @@ -73,11 +82,16 @@ type Pool struct { clientName string sync.RWMutex - clients map[string]PoolClient + members map[string]*poolMember clientsMetric prometheus.Gauge } +type poolMember struct { + client PoolClient + firstFailedHealthCheck time.Time +} + // NewPool creates a new Pool. func NewPool(clientName string, cfg PoolConfig, discovery PoolServiceDiscovery, factory PoolFactory, clientsMetric prometheus.Gauge, logger log.Logger) *Pool { if cfg.MaxConcurrentHealthChecks == 0 { @@ -90,7 +104,7 @@ func NewPool(clientName string, cfg PoolConfig, discovery PoolServiceDiscovery, factory: factory, logger: logger, clientName: clientName, - clients: map[string]PoolClient{}, + members: map[string]*poolMember{}, clientsMetric: clientsMetric, } @@ -108,11 +122,11 @@ func (p *Pool) iteration(_ context.Context) error { return nil } -func (p *Pool) fromCache(addr string) (PoolClient, bool) { +func (p *Pool) fromCache(addr string) (*poolMember, bool) { p.RLock() defer p.RUnlock() - client, ok := p.clients[addr] - return client, ok + member, ok := p.members[addr] + return member, ok } // GetClientFor gets the client for the specified address. If it does not exist @@ -124,9 +138,9 @@ func (p *Pool) GetClientFor(addr string) (PoolClient, error) { // GetClientForInstance gets the client for the specified ring member. If it does not exist // it will make a new client for that instance. func (p *Pool) GetClientForInstance(inst ring.InstanceDesc) (PoolClient, error) { - client, ok := p.fromCache(inst.Addr) + member, ok := p.fromCache(inst.Addr) if ok { - return client, nil + return member.client, nil } // No client in cache so create one @@ -134,16 +148,16 @@ func (p *Pool) GetClientForInstance(inst ring.InstanceDesc) (PoolClient, error) defer p.Unlock() // Check if a client has been created just after checking the cache and before acquiring the lock. - client, ok = p.clients[inst.Addr] + member, ok = p.members[inst.Addr] if ok { - return client, nil + return member.client, nil } client, err := p.factory.FromInstance(inst) if err != nil { return nil, err } - p.clients[inst.Addr] = client + p.members[inst.Addr] = &poolMember{client: client} if p.clientsMetric != nil { p.clientsMetric.Add(1) } @@ -154,10 +168,10 @@ func (p *Pool) GetClientForInstance(inst ring.InstanceDesc) (PoolClient, error) func (p *Pool) RemoveClientFor(addr string) { p.Lock() defer p.Unlock() - client, ok := p.clients[addr] + member, ok := p.members[addr] if ok { - delete(p.clients, addr) - p.closeClient(addr, client) + delete(p.members, addr) + p.closeClient(addr, member.client) } } @@ -180,18 +194,24 @@ func (p *Pool) RemoveClient(client PoolClient, addr string) { p.Lock() defer p.Unlock() if addr != "" { - if p.clients[addr] != client { + member, ok := p.members[addr] + if !ok { return } - delete(p.clients, addr) + + if member.client != client { + return + } + + delete(p.members, addr) p.closeClient(addr, client) return } - for addr, cachedClient := range p.clients { - if cachedClient != client { + for addr, member := range p.members { + if member.client != client { continue } - delete(p.clients, addr) + delete(p.members, addr) p.closeClient(addr, client) return } @@ -202,7 +222,7 @@ func (p *Pool) RegisteredAddresses() []string { result := []string{} p.RLock() defer p.RUnlock() - for addr := range p.clients { + for addr := range p.members { result = append(result, addr) } return result @@ -212,7 +232,7 @@ func (p *Pool) RegisteredAddresses() []string { func (p *Pool) Count() int { p.RLock() defer p.RUnlock() - return len(p.clients) + return len(p.members) } func (p *Pool) removeStaleClients() { @@ -242,17 +262,41 @@ func (p *Pool) cleanUnhealthy() { addresses := p.RegisteredAddresses() _ = concurrency.ForEachJob(context.Background(), len(addresses), p.cfg.MaxConcurrentHealthChecks, func(ctx context.Context, idx int) error { addr := addresses[idx] - client, ok := p.fromCache(addr) + member, ok := p.fromCache(addr) // not ok means someone removed a client between the start of this loop and now - if ok { - err := healthCheck(ctx, client, p.cfg.HealthCheckTimeout) - if err != nil { - level.Warn(p.logger).Log("msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), "addr", addr, "reason", err) - p.RemoveClientFor(addr) - } + if !ok { + return nil } + + err := healthCheck(ctx, member.client, p.cfg.HealthCheckTimeout) + if err == nil { + member.firstFailedHealthCheck = time.Time{} + return nil + } + + if member.firstFailedHealthCheck.IsZero() { + member.firstFailedHealthCheck = time.Now() + } + + if time.Since(member.firstFailedHealthCheck) >= p.cfg.HealthCheckGracePeriod { + level.Warn(p.logger).Log( + "msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), + "addr", addr, + "reason", err, + "first_failed_at", member.firstFailedHealthCheck, + ) + p.RemoveClientFor(addr) + } else { + level.Debug(p.logger).Log( + "msg", fmt.Sprintf("%s failed healthcheck within grace period, not removing", p.clientName), + "addr", addr, + "reason", err, + "first_failed_at", member.firstFailedHealthCheck, + ) + } + // Never return an error, because otherwise the processing would stop and - // remaining health checks would not been executed. + // remaining health checks would not be executed. return nil }) } diff --git a/vendor/github.com/grafana/dskit/ring/lifecycler.go b/vendor/github.com/grafana/dskit/ring/lifecycler.go index 225d2f3c1977f..3b32ffb73248c 100644 --- a/vendor/github.com/grafana/dskit/ring/lifecycler.go +++ b/vendor/github.com/grafana/dskit/ring/lifecycler.go @@ -55,8 +55,8 @@ type LifecyclerConfig struct { // Injected internally ListenPort int `yaml:"-"` - // HideTokensInStatusPage allows tokens to be hidden from management tools e.g. the status page, for use in contexts which do not utilize tokens. - HideTokensInStatusPage bool `yaml:"-"` + + StatusPageConfig StatusPageConfig `yaml:"-"` // If set, specifies the TokenGenerator implementation that will be used for generating tokens. // Default value is nil, which means that RandomTokenGenerator is used. @@ -81,8 +81,8 @@ func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.Flag } f.IntVar(&cfg.NumTokens, prefix+"num-tokens", 128, "Number of tokens for each ingester.") - f.DurationVar(&cfg.HeartbeatPeriod, prefix+"heartbeat-period", 5*time.Second, "Period at which to heartbeat to consul. 0 = disabled.") - f.DurationVar(&cfg.HeartbeatTimeout, prefix+"heartbeat-timeout", 1*time.Minute, "Heartbeat timeout after which instance is assumed to be unhealthy. 0 = disabled.") + f.DurationVar(&cfg.HeartbeatPeriod, prefix+"heartbeat-period", 5*time.Second, "Period at which to heartbeat to consul.") + f.DurationVar(&cfg.HeartbeatTimeout, prefix+"heartbeat-timeout", 1*time.Minute, "Heartbeat timeout after which instance is assumed to be unhealthy.") f.DurationVar(&cfg.JoinAfter, prefix+"join-after", 0*time.Second, "Period to wait for a claim from another member; will join automatically after this.") f.DurationVar(&cfg.ObservePeriod, prefix+"observe-period", 0*time.Second, "Observe tokens after generating to resolve collisions. Useful when using gossiping ring.") f.DurationVar(&cfg.MinReadyDuration, prefix+"min-ready-duration", 15*time.Second, "Minimum duration to wait after the internal readiness checks have passed but before succeeding the readiness endpoint. This is used to slowdown deployment controllers (eg. Kubernetes) after an instance is ready and before they proceed with a rolling update, to give the rest of the cluster instances enough time to receive ring updates.") @@ -115,6 +115,12 @@ func (cfg *LifecyclerConfig) Validate() error { return errors.New("you can't configure the tokens file path when using the spread minimizing token strategy. Please set the tokens file path to an empty string") } } + if cfg.HeartbeatPeriod == 0 { + return errors.New("heartbeat period must be greater than 0") + } + if cfg.HeartbeatTimeout == 0 { + return errors.New("heartbeat timeout must be greater than 0") + } return nil } @@ -705,7 +711,7 @@ func (i *Lifecycler) initRing(ctx context.Context) error { i.setState(ACTIVE) } ro, rots := i.GetReadOnlyState() - ringDesc.AddIngester(i.ID, i.Addr, i.Zone, tokensFromFile, i.GetState(), i.getRegisteredAt(), ro, rots) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, tokensFromFile, i.GetState(), i.getRegisteredAt(), ro, rots, nil) i.setTokens(tokensFromFile) return ringDesc, true, nil } @@ -713,7 +719,7 @@ func (i *Lifecycler) initRing(ctx context.Context) error { // Either we are a new ingester, or consul must have restarted level.Info(i.logger).Log("msg", "instance not found in ring, adding with no tokens", "ring", i.RingName) ro, rots := i.GetReadOnlyState() - ringDesc.AddIngester(i.ID, i.Addr, i.Zone, []uint32{}, i.GetState(), i.getRegisteredAt(), ro, rots) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, []uint32{}, i.GetState(), i.getRegisteredAt(), ro, rots, nil) return ringDesc, true, nil } @@ -817,7 +823,7 @@ func (i *Lifecycler) verifyTokens(ctx context.Context) bool { sort.Sort(ringTokens) ro, rots := i.GetReadOnlyState() - ringDesc.AddIngester(i.ID, i.Addr, i.Zone, ringTokens, i.GetState(), i.getRegisteredAt(), ro, rots) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, ringTokens, i.GetState(), i.getRegisteredAt(), ro, rots, nil) i.setTokens(ringTokens) @@ -926,7 +932,7 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState) er i.setTokens(myTokens) ro, rots := i.GetReadOnlyState() - ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState(), i.getRegisteredAt(), ro, rots) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState(), i.getRegisteredAt(), ro, rots, nil) return ringDesc, true, nil }) @@ -961,7 +967,7 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error { } ro, rots := i.GetReadOnlyState() - ringDesc.AddIngester(i.ID, i.Addr, i.Zone, tokens, i.GetState(), i.getRegisteredAt(), ro, rots) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, tokens, i.GetState(), i.getRegisteredAt(), ro, rots, nil) return ringDesc, true, nil }) @@ -1108,7 +1114,7 @@ func (i *Lifecycler) getRing(ctx context.Context) (*Desc, error) { } func (i *Lifecycler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - newRingPageHandler(i, i.cfg.HeartbeatTimeout, i.cfg.HideTokensInStatusPage).handle(w, req) + newRingPageHandler(i, i.cfg.HeartbeatTimeout, i.cfg.StatusPageConfig).handle(w, req) } // unregister removes our entry from consul. diff --git a/vendor/github.com/grafana/dskit/ring/model.go b/vendor/github.com/grafana/dskit/ring/model.go index 32529b6bab519..f547004cf0763 100644 --- a/vendor/github.com/grafana/dskit/ring/model.go +++ b/vendor/github.com/grafana/dskit/ring/model.go @@ -28,6 +28,8 @@ func (ts ByID) Len() int { return len(ts) } func (ts ByID) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } func (ts ByID) Less(i, j int) bool { return ts[i].Id < ts[j].Id } +type InstanceVersions map[uint64]uint64 + // ProtoDescFactory makes new Descs func ProtoDescFactory() proto.Message { return NewDesc() @@ -54,7 +56,7 @@ func timeToUnixSecons(t time.Time) int64 { // AddIngester adds the given ingester to the ring. Ingester will only use supplied tokens, // any other tokens are removed. -func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state InstanceState, registeredAt time.Time, readOnly bool, readOnlyUpdated time.Time) InstanceDesc { +func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state InstanceState, registeredAt time.Time, readOnly bool, readOnlyUpdated time.Time, versions InstanceVersions) InstanceDesc { if d.Ingesters == nil { d.Ingesters = map[string]InstanceDesc{} } @@ -69,6 +71,7 @@ func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state Instanc RegisteredTimestamp: timeToUnixSecons(registeredAt), ReadOnly: readOnly, ReadOnlyUpdatedTimestamp: timeToUnixSecons(readOnlyUpdated), + Versions: versions, } d.Ingesters[id] = ingester @@ -177,11 +180,8 @@ func (i *InstanceDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, n } // IsHeartbeatHealthy returns whether the heartbeat timestamp for the ingester is within the -// specified timeout period. A timeout of zero disables the timeout; the heartbeat is ignored. +// specified timeout period. func (i *InstanceDesc) IsHeartbeatHealthy(heartbeatTimeout time.Duration, now time.Time) bool { - if heartbeatTimeout == 0 { - return true - } return now.Sub(time.Unix(i.Timestamp, 0)) <= heartbeatTimeout } diff --git a/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go b/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go index 9175f3607b799..ab93c4e5534e8 100644 --- a/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go +++ b/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go @@ -19,6 +19,7 @@ var ( ErrPartitionDoesNotExist = errors.New("the partition does not exist") ErrPartitionStateMismatch = errors.New("the partition state does not match the expected one") ErrPartitionStateChangeNotAllowed = errors.New("partition state change not allowed") + ErrPartitionStateChangeLocked = errors.New("partition state change is locked") allowedPartitionStateChanges = map[PartitionState][]PartitionState{ PartitionPending: {PartitionActive, PartitionInactive}, @@ -364,7 +365,7 @@ func (l *PartitionInstanceLifecycler) reconcileOwnedPartition(ctx context.Contex // have been added since more than the waiting period. if partition.IsPending() && ring.PartitionOwnersCountUpdatedBefore(partitionID, now.Add(-l.cfg.WaitOwnersDurationOnPending)) >= l.cfg.WaitOwnersCountOnPending { level.Info(l.logger).Log("msg", "switching partition state because enough owners have been registered and minimum waiting time has elapsed", "partition", l.cfg.PartitionID, "from_state", PartitionPending, "to_state", PartitionActive) - return ring.UpdatePartitionState(partitionID, PartitionActive, now), nil + return ring.UpdatePartitionState(partitionID, PartitionActive, now) } return false, nil diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go index 8f47b1c562ea7..c89e3f359e049 100644 --- a/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go @@ -159,6 +159,11 @@ type PartitionDesc struct { State PartitionState `protobuf:"varint,2,opt,name=state,proto3,enum=ring.PartitionState" json:"state,omitempty"` // Unix timestamp (with seconds precision) of when has the state changed last time for this partition. StateTimestamp int64 `protobuf:"varint,3,opt,name=stateTimestamp,proto3" json:"stateTimestamp,omitempty"` + // Whether the state can be changed. Set this to true to prevent any change to the state, + // unless this field gets changed back to false first. + StateChangeLocked bool `protobuf:"varint,5,opt,name=stateChangeLocked,proto3" json:"stateChangeLocked,omitempty"` + // Unix timestamp (with seconds precision) of when has the state change lock been mutated last time for this partition. + StateChangeLockedTimestamp int64 `protobuf:"varint,6,opt,name=stateChangeLockedTimestamp,proto3" json:"stateChangeLockedTimestamp,omitempty"` } func (m *PartitionDesc) Reset() { *m = PartitionDesc{} } @@ -221,6 +226,20 @@ func (m *PartitionDesc) GetStateTimestamp() int64 { return 0 } +func (m *PartitionDesc) GetStateChangeLocked() bool { + if m != nil { + return m.StateChangeLocked + } + return false +} + +func (m *PartitionDesc) GetStateChangeLockedTimestamp() int64 { + if m != nil { + return m.StateChangeLockedTimestamp + } + return 0 +} + // OwnerDesc holds the information of a partition owner. type OwnerDesc struct { // Partition that belongs to this owner. A owner can own only 1 partition, but 1 partition can be @@ -300,39 +319,41 @@ func init() { func init() { proto.RegisterFile("partition_ring_desc.proto", fileDescriptor_4df2762174d93dc4) } var fileDescriptor_4df2762174d93dc4 = []byte{ - // 497 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0x7d, 0x76, 0x12, 0xa9, 0x2f, 0x34, 0x39, 0xae, 0x05, 0x99, 0x0c, 0x47, 0x14, 0x44, - 0x09, 0x91, 0x48, 0xa5, 0xc0, 0x80, 0xd8, 0x52, 0x95, 0x01, 0x24, 0x44, 0x65, 0x60, 0xae, 0x9c, - 0xf8, 0x30, 0xa7, 0x34, 0x77, 0x91, 0x7d, 0x6e, 0xd5, 0x05, 0xb1, 0x31, 0xb0, 0xf0, 0x31, 0xf8, - 0x22, 0x48, 0x1d, 0x33, 0x76, 0x42, 0xc4, 0x59, 0x18, 0xfb, 0x11, 0x90, 0xcf, 0xae, 0x63, 0xbb, - 0xea, 0x76, 0xef, 0x7f, 0xef, 0xfd, 0xfe, 0xff, 0x3b, 0x9f, 0xe1, 0xc1, 0xc2, 0x0d, 0x14, 0x57, - 0x5c, 0x8a, 0xe3, 0x80, 0x0b, 0xff, 0xd8, 0x63, 0xe1, 0x74, 0xb8, 0x08, 0xa4, 0x92, 0xa4, 0x96, - 0x08, 0x9d, 0x67, 0x3e, 0x57, 0x5f, 0xa2, 0xc9, 0x70, 0x2a, 0xe7, 0xfb, 0xbe, 0xf4, 0xe5, 0xbe, - 0xde, 0x9c, 0x44, 0x9f, 0x75, 0xa5, 0x0b, 0xbd, 0x4a, 0x87, 0x7a, 0xbf, 0x4d, 0xb8, 0x7b, 0x74, - 0x8d, 0x74, 0xb8, 0xf0, 0x0f, 0x59, 0x38, 0x25, 0xef, 0x00, 0x72, 0x9f, 0xd0, 0x46, 0x5d, 0xab, - 0xdf, 0x1c, 0x3d, 0x19, 0x26, 0xfc, 0xe1, 0x8d, 0xe6, 0x8d, 0x12, 0xbe, 0x16, 0x2a, 0x38, 0x3f, - 0xa8, 0x5d, 0xfc, 0x79, 0x68, 0x38, 0x05, 0x00, 0x19, 0x43, 0x43, 0x9e, 0x09, 0x16, 0x84, 0xb6, - 0xa9, 0x51, 0x8f, 0x6e, 0x43, 0xbd, 0xd7, 0x5d, 0x45, 0x4c, 0x36, 0xd8, 0x71, 0xa0, 0x5d, 0xf1, - 0x21, 0x18, 0xac, 0x19, 0x3b, 0xb7, 0x51, 0x17, 0xf5, 0xeb, 0x4e, 0xb2, 0x24, 0x4f, 0xa1, 0x7e, - 0xea, 0x9e, 0x44, 0xcc, 0x36, 0xbb, 0xa8, 0xdf, 0x1c, 0xed, 0x54, 0x6c, 0x12, 0x0b, 0x27, 0xed, - 0x78, 0x65, 0xbe, 0x44, 0x9d, 0xb7, 0xd0, 0x2c, 0x18, 0x16, 0x79, 0x5b, 0x29, 0xef, 0x71, 0x99, - 0xd7, 0x4e, 0x79, 0x7a, 0xa6, 0xc2, 0xea, 0xfd, 0x40, 0xb0, 0x5d, 0x32, 0x22, 0x2d, 0x30, 0xb9, - 0x67, 0xd7, 0x74, 0x3a, 0x93, 0x7b, 0xe4, 0x3e, 0x34, 0x94, 0x9c, 0xb1, 0xec, 0x3e, 0xb7, 0x9d, - 0xac, 0x22, 0x03, 0xa8, 0x87, 0xca, 0x55, 0xa9, 0x49, 0x6b, 0xb4, 0x5b, 0x09, 0xfd, 0x21, 0xd9, - 0x73, 0xd2, 0x16, 0xb2, 0x07, 0x2d, 0xbd, 0xf8, 0xc8, 0xe7, 0x2c, 0x54, 0xee, 0x7c, 0x61, 0x5b, - 0x5d, 0xd4, 0xb7, 0x9c, 0x8a, 0xda, 0xfb, 0x8e, 0x60, 0x2b, 0x8f, 0x99, 0x4c, 0x25, 0xb7, 0xe8, - 0xe5, 0xcc, 0xec, 0xce, 0x2a, 0x2a, 0xd9, 0x2b, 0x27, 0xc1, 0x85, 0xe3, 0x96, 0x52, 0x0c, 0x00, - 0x47, 0x0b, 0xcf, 0x55, 0xcc, 0xab, 0xe6, 0xb8, 0xa1, 0x0f, 0xbe, 0x42, 0xab, 0x7c, 0x14, 0xb2, - 0x0b, 0x38, 0x57, 0x3e, 0x89, 0x99, 0x90, 0x67, 0x02, 0x1b, 0x25, 0xf5, 0x88, 0x09, 0x8f, 0x0b, - 0x1f, 0x23, 0xb2, 0x53, 0xf8, 0xea, 0xe3, 0xa9, 0xe2, 0xa7, 0x0c, 0x9b, 0xe4, 0x5e, 0xe1, 0xc5, - 0xbe, 0x11, 0x6e, 0x2a, 0x5b, 0x25, 0xc2, 0x21, 0x3b, 0x61, 0x8a, 0x79, 0xb8, 0x36, 0x18, 0x03, - 0x6c, 0x0e, 0x40, 0x30, 0xdc, 0xd1, 0xd5, 0xc6, 0xb7, 0x9d, 0xbd, 0x81, 0x8c, 0x8e, 0xf2, 0x96, - 0x6b, 0x84, 0x79, 0xf0, 0x62, 0xb9, 0xa2, 0xc6, 0xe5, 0x8a, 0x1a, 0x57, 0x2b, 0x8a, 0xbe, 0xc5, - 0x14, 0xfd, 0x8a, 0x29, 0xba, 0x88, 0x29, 0x5a, 0xc6, 0x14, 0xfd, 0x8d, 0x29, 0xfa, 0x17, 0x53, - 0xe3, 0x2a, 0xa6, 0xe8, 0xe7, 0x9a, 0x1a, 0xcb, 0x35, 0x35, 0x2e, 0xd7, 0xd4, 0x98, 0x34, 0xf4, - 0xff, 0xf5, 0xfc, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xd1, 0xa7, 0xbd, 0xb1, 0x03, 0x00, - 0x00, + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x3f, 0x6f, 0xd3, 0x4e, + 0x18, 0xc7, 0x7d, 0xce, 0x1f, 0xfd, 0xfa, 0xe4, 0xd7, 0xc4, 0xbd, 0x16, 0x64, 0x32, 0x1c, 0x51, + 0x10, 0xc5, 0x44, 0x90, 0x4a, 0x81, 0x01, 0x31, 0x20, 0xa5, 0x94, 0x01, 0x04, 0xa2, 0x32, 0x30, + 0x57, 0x8e, 0x7d, 0xb8, 0xa7, 0x34, 0x77, 0x91, 0x7d, 0x69, 0xd5, 0x05, 0xb1, 0xb1, 0xf2, 0x32, + 0x78, 0x23, 0x48, 0x1d, 0x33, 0x76, 0x42, 0xc4, 0x59, 0x18, 0xbb, 0xb2, 0x21, 0x9f, 0x5d, 0xc7, + 0x76, 0x04, 0xdb, 0x3d, 0xdf, 0x7b, 0xee, 0xf3, 0x7d, 0x9e, 0xe7, 0xee, 0xe0, 0xd6, 0xd4, 0x09, + 0x24, 0x93, 0x4c, 0xf0, 0xa3, 0x80, 0x71, 0xff, 0xc8, 0xa3, 0xa1, 0xdb, 0x9f, 0x06, 0x42, 0x0a, + 0x5c, 0x8d, 0x85, 0xf6, 0x43, 0x9f, 0xc9, 0xe3, 0xd9, 0xa8, 0xef, 0x8a, 0xc9, 0x9e, 0x2f, 0x7c, + 0xb1, 0xa7, 0x36, 0x47, 0xb3, 0x8f, 0x2a, 0x52, 0x81, 0x5a, 0x25, 0x87, 0xba, 0xdf, 0x75, 0xd8, + 0x3a, 0xbc, 0x46, 0xda, 0x8c, 0xfb, 0x07, 0x34, 0x74, 0xf1, 0x1b, 0x80, 0xcc, 0x27, 0x34, 0x51, + 0xa7, 0x62, 0x35, 0x06, 0xf7, 0xfa, 0x31, 0xbf, 0xbf, 0x96, 0xbc, 0x52, 0xc2, 0x17, 0x5c, 0x06, + 0xe7, 0xfb, 0xd5, 0x8b, 0x1f, 0xb7, 0x35, 0x3b, 0x07, 0xc0, 0x43, 0xa8, 0x8b, 0x33, 0x4e, 0x83, + 0xd0, 0xd4, 0x15, 0xea, 0xce, 0xdf, 0x50, 0x6f, 0x55, 0x56, 0x1e, 0x93, 0x1e, 0x6c, 0xdb, 0xd0, + 0x2a, 0xf9, 0x60, 0x03, 0x2a, 0x63, 0x7a, 0x6e, 0xa2, 0x0e, 0xb2, 0x6a, 0x76, 0xbc, 0xc4, 0xf7, + 0xa1, 0x76, 0xea, 0x9c, 0xcc, 0xa8, 0xa9, 0x77, 0x90, 0xd5, 0x18, 0x6c, 0x97, 0x6c, 0x62, 0x0b, + 0x3b, 0xc9, 0x78, 0xaa, 0x3f, 0x41, 0xed, 0x57, 0xd0, 0xc8, 0x19, 0xe6, 0x79, 0x1b, 0x09, 0xef, + 0x6e, 0x91, 0xd7, 0x4a, 0x78, 0xea, 0x4c, 0x89, 0xd5, 0xfd, 0x8d, 0x60, 0xb3, 0x60, 0x84, 0x9b, + 0xa0, 0x33, 0xcf, 0xac, 0xaa, 0xea, 0x74, 0xe6, 0xe1, 0x9b, 0x50, 0x97, 0x62, 0x4c, 0xd3, 0x79, + 0x6e, 0xda, 0x69, 0x84, 0x7b, 0x50, 0x0b, 0xa5, 0x23, 0x13, 0x93, 0xe6, 0x60, 0xa7, 0x54, 0xf4, + 0xbb, 0x78, 0xcf, 0x4e, 0x52, 0xf0, 0x2e, 0x34, 0xd5, 0xe2, 0x3d, 0x9b, 0xd0, 0x50, 0x3a, 0x93, + 0xa9, 0x59, 0xe9, 0x20, 0xab, 0x62, 0x97, 0x54, 0xfc, 0x00, 0xb6, 0x94, 0xf2, 0xfc, 0xd8, 0xe1, + 0x3e, 0x7d, 0x2d, 0xdc, 0x31, 0xf5, 0xcc, 0x5a, 0x07, 0x59, 0xff, 0xd9, 0xeb, 0x1b, 0xf8, 0x19, + 0xb4, 0xd7, 0xc4, 0x95, 0x43, 0x5d, 0x39, 0xfc, 0x23, 0xa3, 0xfb, 0x05, 0xc1, 0x46, 0x36, 0x94, + 0xb8, 0xc6, 0xf8, 0xce, 0xbc, 0xac, 0x83, 0xf4, 0x86, 0x4a, 0x2a, 0xde, 0x2d, 0xf6, 0x6d, 0xe4, + 0x86, 0x5b, 0xe8, 0xb9, 0x07, 0xc6, 0x6c, 0xea, 0x39, 0x32, 0x5f, 0x53, 0xd2, 0xf5, 0x9a, 0xde, + 0xfb, 0x04, 0xcd, 0xe2, 0xe0, 0xf0, 0x0e, 0x18, 0x99, 0xf2, 0x81, 0x8f, 0xb9, 0x38, 0xe3, 0x86, + 0x56, 0x50, 0x0f, 0x29, 0xf7, 0x18, 0xf7, 0x0d, 0x84, 0xb7, 0x73, 0x6f, 0x6c, 0xe8, 0x4a, 0x76, + 0x4a, 0x0d, 0x1d, 0xdf, 0xc8, 0xfd, 0x8f, 0x97, 0xdc, 0x49, 0xe4, 0x4a, 0x81, 0x70, 0x40, 0x4f, + 0xa8, 0xa4, 0x9e, 0x51, 0xed, 0x0d, 0x01, 0x56, 0x0d, 0x60, 0x03, 0xfe, 0x57, 0xd1, 0xca, 0xb7, + 0x95, 0xbe, 0xb8, 0x94, 0x8e, 0xb2, 0x94, 0x6b, 0x84, 0xbe, 0xff, 0x78, 0xbe, 0x20, 0xda, 0xe5, + 0x82, 0x68, 0x57, 0x0b, 0x82, 0x3e, 0x47, 0x04, 0x7d, 0x8b, 0x08, 0xba, 0x88, 0x08, 0x9a, 0x47, + 0x04, 0xfd, 0x8c, 0x08, 0xfa, 0x15, 0x11, 0xed, 0x2a, 0x22, 0xe8, 0xeb, 0x92, 0x68, 0xf3, 0x25, + 0xd1, 0x2e, 0x97, 0x44, 0x1b, 0xd5, 0xd5, 0x6f, 0x7e, 0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0xd8, + 0xc0, 0x88, 0x8e, 0x1f, 0x04, 0x00, 0x00, } func (x PartitionState) String() string { @@ -426,6 +447,12 @@ func (this *PartitionDesc) Equal(that interface{}) bool { if this.StateTimestamp != that1.StateTimestamp { return false } + if this.StateChangeLocked != that1.StateChangeLocked { + return false + } + if this.StateChangeLockedTimestamp != that1.StateChangeLockedTimestamp { + return false + } return true } func (this *OwnerDesc) Equal(that interface{}) bool { @@ -497,12 +524,14 @@ func (this *PartitionDesc) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 10) s = append(s, "&ring.PartitionDesc{") s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") s = append(s, "Tokens: "+fmt.Sprintf("%#v", this.Tokens)+",\n") s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") s = append(s, "StateTimestamp: "+fmt.Sprintf("%#v", this.StateTimestamp)+",\n") + s = append(s, "StateChangeLocked: "+fmt.Sprintf("%#v", this.StateChangeLocked)+",\n") + s = append(s, "StateChangeLockedTimestamp: "+fmt.Sprintf("%#v", this.StateChangeLockedTimestamp)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -615,6 +644,21 @@ func (m *PartitionDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StateChangeLockedTimestamp != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.StateChangeLockedTimestamp)) + i-- + dAtA[i] = 0x30 + } + if m.StateChangeLocked { + i-- + if m.StateChangeLocked { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } if m.Id != 0 { i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.Id)) i-- @@ -749,6 +793,12 @@ func (m *PartitionDesc) Size() (n int) { if m.Id != 0 { n += 1 + sovPartitionRingDesc(uint64(m.Id)) } + if m.StateChangeLocked { + n += 2 + } + if m.StateChangeLockedTimestamp != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.StateChangeLockedTimestamp)) + } return n } @@ -816,6 +866,8 @@ func (this *PartitionDesc) String() string { `State:` + fmt.Sprintf("%v", this.State) + `,`, `StateTimestamp:` + fmt.Sprintf("%v", this.StateTimestamp) + `,`, `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `StateChangeLocked:` + fmt.Sprintf("%v", this.StateChangeLocked) + `,`, + `StateChangeLockedTimestamp:` + fmt.Sprintf("%v", this.StateChangeLockedTimestamp) + `,`, `}`, }, "") return s @@ -1299,6 +1351,45 @@ func (m *PartitionDesc) Unmarshal(dAtA []byte) error { break } } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StateChangeLocked", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StateChangeLocked = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StateChangeLockedTimestamp", wireType) + } + m.StateChangeLockedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StateChangeLockedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto index d8fb9316f01db..ea40767f00bb1 100644 --- a/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto @@ -30,6 +30,13 @@ message PartitionDesc { // Unix timestamp (with seconds precision) of when has the state changed last time for this partition. int64 stateTimestamp = 3; + + // Whether the state can be changed. Set this to true to prevent any change to the state, + // unless this field gets changed back to false first. + bool stateChangeLocked = 5; + + // Unix timestamp (with seconds precision) of when has the state change lock been mutated last time for this partition. + int64 stateChangeLockedTimestamp = 6; } enum PartitionState { diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go b/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go index b49761658dc41..7b68231e87480 100644 --- a/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go @@ -38,6 +38,12 @@ func (l *PartitionRingEditor) RemoveMultiPartitionOwner(ctx context.Context, ins }) } +func (l *PartitionRingEditor) SetPartitionStateChangeLock(ctx context.Context, partitionID int32, locked bool) error { + return l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + return SetPartitionStateChangeLock(ring, partitionID, locked) + }) +} + func (l *PartitionRingEditor) updateRing(ctx context.Context, update func(ring *PartitionRingDesc) (bool, error)) error { return l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { ringDesc := GetOrCreatePartitionRingDesc(in) @@ -66,5 +72,18 @@ func changePartitionState(ring *PartitionRingDesc, partitionID int32, toState Pa return false, errors.Wrapf(ErrPartitionStateChangeNotAllowed, "change partition state from %s to %s", partition.State.CleanName(), toState.CleanName()) } - return ring.UpdatePartitionState(partitionID, toState, time.Now()), nil + return ring.UpdatePartitionState(partitionID, toState, time.Now()) +} + +func SetPartitionStateChangeLock(ring *PartitionRingDesc, partitionID int32, locked bool) (changed bool, _ error) { + partition, exists := ring.Partitions[partitionID] + if !exists { + return false, ErrPartitionDoesNotExist + } + + if partition.StateChangeLocked == locked { + return false, nil + } + + return ring.UpdatePartitionStateChangeLock(partitionID, locked, time.Now()), nil } diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_http.go b/vendor/github.com/grafana/dskit/ring/partition_ring_http.go index 698f33b0f775b..cd865bdcbde9d 100644 --- a/vendor/github.com/grafana/dskit/ring/partition_ring_http.go +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_http.go @@ -29,6 +29,7 @@ var partitionRingPageTemplate = template.Must(template.New("webpage").Funcs(temp type PartitionRingUpdater interface { ChangePartitionState(ctx context.Context, partitionID int32, toState PartitionState) error + SetPartitionStateChangeLock(ctx context.Context, partitionID int32, locked bool) error } type PartitionRingPageHandler struct { @@ -68,14 +69,15 @@ func (h *PartitionRingPageHandler) handleGetRequest(w http.ResponseWriter, req * slices.Sort(owners) partitionsByID[id] = partitionPageData{ - ID: id, - Corrupted: false, - State: partition.State, - StateTimestamp: partition.GetStateTime(), - OwnerIDs: owners, - Tokens: partition.Tokens, - NumTokens: len(partition.Tokens), - Ownership: distancePercentage(ownedTokens[id]), + ID: id, + Corrupted: false, + State: partition.State, + StateTimestamp: partition.GetStateTime(), + StateChangeLocked: partition.StateChangeLocked, + OwnerIDs: owners, + Tokens: partition.Tokens, + NumTokens: len(partition.Tokens), + Ownership: distancePercentage(ownedTokens[id]), } } @@ -147,6 +149,50 @@ func (h *PartitionRingPageHandler) handlePostRequest(w http.ResponseWriter, req http.Error(w, fmt.Sprintf("failed to change partition state: %s", err.Error()), http.StatusBadRequest) return } + } else if req.FormValue("action") == "change_state_and_lock" { + // NOTE: To avoid playing Whac-a-Mole with rollout-operator (or other actors) reverting the state to active BEFORE the operator + // is able to lock the partition state change, we offer this method which attempts to change the state and lock immediately. + // This currently contains a race. But since usually this endpoint is served by many replicas, fixing the race would require + // some work. We believe it's not worth it to add the additional complexity and we rely on the user using this to ensure + // that the state change is locked in the desired state. + partitionID, err := strconv.Atoi(req.FormValue("partition_id")) + if err != nil { + http.Error(w, fmt.Sprintf("invalid partition ID: %s", err.Error()), http.StatusBadRequest) + return + } + + toState, ok := PartitionState_value[req.FormValue("partition_state")] + if !ok { + http.Error(w, "invalid partition state", http.StatusBadRequest) + return + } + + if err := h.updater.ChangePartitionState(req.Context(), int32(partitionID), PartitionState(toState)); err != nil { + http.Error(w, fmt.Sprintf("failed to change partition state: %s", err.Error()), http.StatusBadRequest) + return + } + + if err := h.updater.SetPartitionStateChangeLock(req.Context(), int32(partitionID), true); err != nil { + http.Error(w, fmt.Sprintf("failed to lock partition state change: %s", err.Error()), http.StatusBadRequest) + return + } + } else if req.FormValue("action") == "state_change_lock" { + partitionID, err := strconv.Atoi(req.FormValue("partition_id")) + if err != nil { + http.Error(w, fmt.Sprintf("invalid partition ID: %s", err.Error()), http.StatusBadRequest) + return + } + + locked, err := strconv.ParseBool(req.FormValue("locked")) + if err != nil { + http.Error(w, fmt.Sprintf("invalid locked value: %s", err.Error()), http.StatusBadRequest) + return + } + + if err := h.updater.SetPartitionStateChangeLock(req.Context(), int32(partitionID), locked); err != nil { + http.Error(w, fmt.Sprintf("failed to lock partition state change: %s", err.Error()), http.StatusBadRequest) + return + } } // Implement PRG pattern to prevent double-POST and work with CSRF middleware. @@ -164,14 +210,15 @@ type partitionRingPageData struct { } type partitionPageData struct { - ID int32 `json:"id"` - Corrupted bool `json:"corrupted"` - State PartitionState `json:"state"` - StateTimestamp time.Time `json:"state_timestamp"` - OwnerIDs []string `json:"owner_ids"` - Tokens []uint32 `json:"tokens"` - NumTokens int `json:"-"` - Ownership float64 `json:"-"` + ID int32 `json:"id"` + Corrupted bool `json:"corrupted"` + State PartitionState `json:"state"` + StateTimestamp time.Time `json:"state_timestamp"` + StateChangeLocked bool `json:"state_change_locked"` + OwnerIDs []string `json:"owner_ids"` + Tokens []uint32 `json:"tokens"` + NumTokens int `json:"-"` + Ownership float64 `json:"-"` } // distancePercentage renders a given token distance as the percentage of all possible token values covered by that distance. diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_model.go b/vendor/github.com/grafana/dskit/ring/partition_ring_model.go index cecda6b89895b..59b308770d7db 100644 --- a/vendor/github.com/grafana/dskit/ring/partition_ring_model.go +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_model.go @@ -206,19 +206,41 @@ func (m *PartitionRingDesc) AddPartition(id int32, state PartitionState, now tim // UpdatePartitionState changes the state of a partition. Returns true if the state was changed, // or false if the update was a no-op. -func (m *PartitionRingDesc) UpdatePartitionState(id int32, state PartitionState, now time.Time) bool { +func (m *PartitionRingDesc) UpdatePartitionState(id int32, state PartitionState, now time.Time) (bool, error) { d, ok := m.Partitions[id] if !ok { - return false + return false, nil } if d.State == state { - return false + return false, nil + } + + if d.StateChangeLocked { + return false, ErrPartitionStateChangeLocked } d.State = state d.StateTimestamp = now.Unix() m.Partitions[id] = d + return true, nil +} + +// UpdatePartitionStateChangeLock changes the state change lock of a partition. Returns true if the lock was changed, +// or false if the update was a no-op. +func (m *PartitionRingDesc) UpdatePartitionStateChangeLock(id int32, locked bool, now time.Time) bool { + d, ok := m.Partitions[id] + if !ok { + return false + } + + if d.StateChangeLocked == locked { + return false + } + + d.StateChangeLocked = locked + d.StateChangeLockedTimestamp = now.Unix() + m.Partitions[id] = d return true } @@ -344,6 +366,13 @@ func (m *PartitionRingDesc) mergeWithTime(mergeable memberlist.Mergeable, localC thisPart.State = otherPart.State thisPart.StateTimestamp = otherPart.StateTimestamp } + + if otherPart.StateChangeLockedTimestamp > thisPart.StateChangeLockedTimestamp { + changed = true + + thisPart.StateChangeLocked = otherPart.StateChangeLocked + thisPart.StateChangeLockedTimestamp = otherPart.StateChangeLockedTimestamp + } } if changed { diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml b/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml index 1f0a2eaf0203e..86ada17c66c21 100644 --- a/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml @@ -50,12 +50,21 @@ {{ if and (not .Corrupted) (ne (index $stateChanges .State) 0) }} {{ $toState := index $stateChanges .State }} -
+ - + + +
+ {{ end }} + {{ if not .Corrupted }} +
+ + + +
{{ end }} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go b/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go index 2b9d47736833c..9f0049ba80527 100644 --- a/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go @@ -2,6 +2,7 @@ package ring import ( "context" + "strconv" "sync" "github.com/go-kit/log" @@ -106,6 +107,15 @@ func (w *PartitionRingWatcher) updatePartitionRing(desc *PartitionRingDesc) { for state, count := range desc.countPartitionsByState() { w.numPartitionsGaugeVec.WithLabelValues(state.CleanName()).Set(float64(count)) } + + // Check partitions whose state change is locked and log them. + for partitionID, partition := range desc.Partitions { + state := partition.GetState().CleanName() + partitionIDStr := strconv.Itoa(int(partitionID)) + if partition.StateChangeLocked { + level.Warn(w.logger).Log("msg", "partition state change is locked", "partition_id", partitionIDStr, "partition_state", state) + } + } } // PartitionRing returns the most updated snapshot of the PartitionRing. The returned instance diff --git a/vendor/github.com/grafana/dskit/ring/ring.go b/vendor/github.com/grafana/dskit/ring/ring.go index 732bba73459c6..26594ae82773e 100644 --- a/vendor/github.com/grafana/dskit/ring/ring.go +++ b/vendor/github.com/grafana/dskit/ring/ring.go @@ -10,6 +10,7 @@ import ( "math/rand" "net/http" "slices" + "strings" "sync" "time" @@ -32,6 +33,8 @@ const ( // GetBufferSize is the suggested size of buffers passed to Ring.Get(). It's based on // a typical replication factor 3, plus extra room for a JOINING + LEAVING instance. GetBufferSize = 5 + + maxZonesForMetrics = 32 ) // Options are the result of Option instances that can be used to modify Ring.GetWithOptions behavior. @@ -196,9 +199,8 @@ type Config struct { // Whether the shuffle-sharding subring cache is disabled. This option is set // internally and never exposed to the user. SubringCacheDisabled bool `yaml:"-"` - // HideTokensInStatusPage allows tokens to be hidden from management tools e.g. the status page, for use in contexts which do not utilize tokens. - // This option is set internally and never exposed to the user. - HideTokensInStatusPage bool `yaml:"-"` + + StatusPageConfig StatusPageConfig `yaml:"-"` } // RegisterFlags adds the flags required to config this to the given FlagSet with a specified prefix @@ -210,12 +212,20 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { cfg.KVStore.RegisterFlagsWithPrefix(prefix, "collectors/", f) - f.DurationVar(&cfg.HeartbeatTimeout, prefix+"ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes. 0 = never (timeout disabled).") + f.DurationVar(&cfg.HeartbeatTimeout, prefix+"ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes.") f.IntVar(&cfg.ReplicationFactor, prefix+"distributor.replication-factor", 3, "The number of ingesters to write to and read from.") f.BoolVar(&cfg.ZoneAwarenessEnabled, prefix+"distributor.zone-awareness-enabled", false, "True to enable the zone-awareness and replicate ingested samples across different availability zones.") f.Var(&cfg.ExcludedZones, prefix+"distributor.excluded-zones", "Comma-separated list of zones to exclude from the ring. Instances in excluded zones will be filtered out from the ring.") } +// Validate checks the consistency of Config, and fails if this cannot be achieved. +func (cfg *Config) Validate() error { + if cfg.HeartbeatTimeout == 0 { + return errors.New("heartbeat timeout must be greater than 0") + } + return nil +} + type instanceInfo struct { InstanceID string Zone string @@ -258,6 +268,10 @@ type Ring struct { // to be sorted alphabetically. ringZones []string + // Map containing all ring zones ever discovered, even if they have no instances, + // capped at 32 zones to limit cardinality. + trackedRingZones map[string]struct{} + // Number of registered instances with tokens. instancesWithTokensCount int @@ -279,6 +293,7 @@ type Ring struct { shuffledSubringWithLookbackCache map[subringCacheKey]cachedSubringWithLookback[*Ring] numMembersGaugeVec *prometheus.GaugeVec + numZoneMembersGaugeVec *prometheus.GaugeVec totalTokensGauge prometheus.Gauge oldestTimestampGaugeVec *prometheus.GaugeVec @@ -325,6 +340,7 @@ func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client KVClient: store, strategy: strategy, ringDesc: &Desc{}, + trackedRingZones: map[string]struct{}{}, shuffledSubringCache: map[subringCacheKey]*Ring{}, shuffledSubringWithLookbackCache: map[subringCacheKey]cachedSubringWithLookback[*Ring]{}, numMembersGaugeVec: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ @@ -333,6 +349,11 @@ func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client ConstLabels: map[string]string{"name": name}, }, []string{"state"}), + numZoneMembersGaugeVec: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Name: "ring_zone_members", + Help: "Number of ring members for each zone/state pair", + ConstLabels: map[string]string{"name": name}, + }, []string{"zone", "state"}), totalTokensGauge: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "ring_tokens_total", Help: "Number of tokens in the ring", @@ -441,7 +462,7 @@ func (r *Ring) setRingStateFromDesc(ringDesc *Desc, updateMetrics, updateRegiste r.ringTokens = ringTokens r.ringTokensByZone = ringTokensByZone r.ringInstanceByToken = ringInstanceByToken - r.ringZones = ringZones + r.updateRingZones(ringZones) r.instancesWithTokensCount = instancesWithTokensCount r.instancesCountPerZone = instancesCountPerZone r.instancesWithTokensCountPerZone = instancesWithTokensCountPerZone @@ -778,15 +799,49 @@ func (r *Desc) CountTokens() map[string]int64 { return owned } +func (r *Ring) updateRingZones(zones []string) { + r.ringZones = zones + var notAdded []string + for _, zone := range zones { + if _, ok := r.trackedRingZones[zone]; !ok { + if len(r.trackedRingZones) >= maxZonesForMetrics { + notAdded = append(notAdded, zone) + } else { + r.trackedRingZones[zone] = struct{}{} + } + } + } + if len(notAdded) > 0 { + level.Warn(r.logger).Log( + "msg", "not tracking metrics for zone(s) due to high cardinality", + "zones", strings.Join(notAdded, ","), + ) + } +} + // updateRingMetrics updates ring metrics. Caller must be holding the Write lock! func (r *Ring) updateRingMetrics() { numByState := map[string]int{} oldestTimestampByState := map[string]int64{} + // Will emit nothing if no zones were discovered. + var numByZoneAndState map[string]map[string]int + if r.cfg.ZoneAwarenessEnabled { + numByZoneAndState = map[string]map[string]int{} + for zone := range r.trackedRingZones { + numByZoneAndState[zone] = map[string]int{} + } + } + // Initialized to zero so we emit zero-metrics (instead of not emitting anything) for _, s := range []string{unhealthy, ACTIVE.String(), LEAVING.String(), PENDING.String(), JOINING.String()} { numByState[s] = 0 oldestTimestampByState[s] = 0 + if r.cfg.ZoneAwarenessEnabled { + for zone := range numByZoneAndState { + numByZoneAndState[zone][s] = 0 + } + } } for _, instance := range r.ringDesc.Ingesters { @@ -798,6 +853,11 @@ func (r *Ring) updateRingMetrics() { if oldestTimestampByState[s] == 0 || instance.Timestamp < oldestTimestampByState[s] { oldestTimestampByState[s] = instance.Timestamp } + if r.cfg.ZoneAwarenessEnabled { + if byState, ok := numByZoneAndState[instance.Zone]; ok { + byState[s]++ + } + } } for state, count := range numByState { @@ -806,6 +866,13 @@ func (r *Ring) updateRingMetrics() { for state, timestamp := range oldestTimestampByState { r.oldestTimestampGaugeVec.WithLabelValues(state).Set(float64(timestamp)) } + if r.cfg.ZoneAwarenessEnabled { + for zone, byState := range numByZoneAndState { + for state, count := range byState { + r.numZoneMembersGaugeVec.WithLabelValues(zone, state).Set(float64(count)) + } + } + } r.totalTokensGauge.Set(float64(len(r.ringTokens))) } @@ -1058,14 +1125,17 @@ func (r *Ring) buildRingForTheShard(shard map[string]InstanceDesc) *Ring { shardDesc := &Desc{Ingesters: shard} shardTokensByZone := shardDesc.getTokensByZone() shardTokens := mergeTokenGroups(shardTokensByZone) + zones := getZones(shardTokensByZone) - return &Ring{ + ring := &Ring{ cfg: r.cfg, strategy: r.strategy, + logger: r.logger, ringDesc: shardDesc, ringTokens: shardTokens, ringTokensByZone: shardTokensByZone, - ringZones: getZones(shardTokensByZone), + ringZones: zones, + trackedRingZones: map[string]struct{}{}, instancesWithTokensCount: shardDesc.instancesWithTokensCount(), instancesCountPerZone: shardDesc.instancesCountPerZone(), instancesWithTokensCountPerZone: shardDesc.instancesWithTokensCountPerZone(), @@ -1082,6 +1152,9 @@ func (r *Ring) buildRingForTheShard(shard map[string]InstanceDesc) *Ring { // For caching to work, remember these values. lastTopologyChange: r.lastTopologyChange, } + + ring.updateRingZones(zones) + return ring } // mergeTokenGroups returns a sorted list of all tokens in each entry in groupsByName. @@ -1345,7 +1418,7 @@ func (r *Ring) getRing(_ context.Context) (*Desc, error) { } func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { - newRingPageHandler(r, r.cfg.HeartbeatTimeout, r.cfg.HideTokensInStatusPage).handle(w, req) + newRingPageHandler(r, r.cfg.HeartbeatTimeout, r.cfg.StatusPageConfig).handle(w, req) } // InstancesCount returns the number of instances in the ring. diff --git a/vendor/github.com/grafana/dskit/ring/ring.pb.go b/vendor/github.com/grafana/dskit/ring/ring.pb.go index f976b7e994d8c..7a435a1ef2233 100644 --- a/vendor/github.com/grafana/dskit/ring/ring.pb.go +++ b/vendor/github.com/grafana/dskit/ring/ring.pb.go @@ -137,6 +137,11 @@ type InstanceDesc struct { // Read-only instances go through standard state changes, and special handling is applied to them // during shuffle shards. ReadOnly bool `protobuf:"varint,11,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // Map of component / feature identifiers to version number. + // The component / feature identifiers are specific to the application (ie. aren't defined in dskit). + // We use a uint64 for the version number itself as versions are expected to increase and be + // sortable. + Versions map[uint64]uint64 `protobuf:"bytes,12,rep,name=versions,proto3" json:"versions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } func (m *InstanceDesc) Reset() { *m = InstanceDesc{} } @@ -234,47 +239,58 @@ func (m *InstanceDesc) GetReadOnly() bool { return false } +func (m *InstanceDesc) GetVersions() map[uint64]uint64 { + if m != nil { + return m.Versions + } + return nil +} + func init() { proto.RegisterEnum("ring.InstanceState", InstanceState_name, InstanceState_value) proto.RegisterType((*Desc)(nil), "ring.Desc") proto.RegisterMapType((map[string]InstanceDesc)(nil), "ring.Desc.IngestersEntry") proto.RegisterType((*InstanceDesc)(nil), "ring.InstanceDesc") + proto.RegisterMapType((map[uint64]uint64)(nil), "ring.InstanceDesc.VersionsEntry") } func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) } var fileDescriptor_26381ed67e202a6e = []byte{ - // 478 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x31, 0x6f, 0xd3, 0x40, - 0x1c, 0xc5, 0x7d, 0xf6, 0xc5, 0xb5, 0xff, 0xa1, 0x91, 0x75, 0x45, 0xc8, 0xb4, 0xe8, 0xb0, 0x3a, - 0x19, 0x24, 0x52, 0x11, 0x18, 0x10, 0x52, 0x87, 0x96, 0x1a, 0xe4, 0x28, 0x4a, 0x2b, 0x13, 0xba, - 0x46, 0x4e, 0x7c, 0x18, 0xab, 0x89, 0x1d, 0xd9, 0x17, 0xa4, 0x30, 0xf1, 0x11, 0xf8, 0x02, 0xec, - 0x7c, 0x0e, 0xa6, 0x8e, 0x19, 0x3b, 0x21, 0xe2, 0x2c, 0x8c, 0xfd, 0x08, 0xe8, 0xce, 0x6d, 0xdd, - 0x6c, 0xef, 0xe5, 0xbd, 0xff, 0xef, 0xe5, 0x24, 0x03, 0xe4, 0x49, 0x1a, 0xb7, 0x67, 0x79, 0xc6, - 0x33, 0x82, 0x85, 0xde, 0x7d, 0x11, 0x27, 0xfc, 0xcb, 0x7c, 0xd4, 0x1e, 0x67, 0xd3, 0x83, 0x38, - 0x8b, 0xb3, 0x03, 0x19, 0x8e, 0xe6, 0x9f, 0xa5, 0x93, 0x46, 0xaa, 0xea, 0x68, 0xff, 0x27, 0x02, - 0x7c, 0xc2, 0x8a, 0x31, 0x39, 0x04, 0x33, 0x49, 0x63, 0x56, 0x70, 0x96, 0x17, 0x36, 0x72, 0x34, - 0xb7, 0xd9, 0x79, 0xdc, 0x96, 0x74, 0x11, 0xb7, 0xfd, 0xdb, 0xcc, 0x4b, 0x79, 0xbe, 0x38, 0xc6, - 0x97, 0x7f, 0x9e, 0x2a, 0x41, 0x7d, 0xb1, 0x7b, 0x06, 0xad, 0xcd, 0x0a, 0xb1, 0x40, 0xbb, 0x60, - 0x0b, 0x1b, 0x39, 0xc8, 0x35, 0x03, 0x21, 0x89, 0x0b, 0x8d, 0xaf, 0xe1, 0x64, 0xce, 0x6c, 0xd5, - 0x41, 0x6e, 0xb3, 0x43, 0x2a, 0xbc, 0x9f, 0x16, 0x3c, 0x4c, 0xc7, 0x4c, 0xcc, 0x04, 0x55, 0xe1, - 0xad, 0xfa, 0x06, 0x75, 0xb1, 0xa1, 0x5a, 0xda, 0xfe, 0x6f, 0x15, 0x1e, 0xdc, 0x6f, 0x10, 0x02, - 0x38, 0x8c, 0xa2, 0xfc, 0x86, 0x2b, 0x35, 0x79, 0x02, 0x26, 0x4f, 0xa6, 0xac, 0xe0, 0xe1, 0x74, - 0x26, 0xe1, 0x5a, 0x50, 0xff, 0x40, 0x9e, 0x41, 0xa3, 0xe0, 0x21, 0x67, 0xb6, 0xe6, 0x20, 0xb7, - 0xd5, 0xd9, 0xd9, 0x9c, 0xfd, 0x28, 0xa2, 0xa0, 0x6a, 0x90, 0x47, 0xa0, 0xf3, 0xec, 0x82, 0xa5, - 0x85, 0xad, 0x3b, 0x9a, 0xbb, 0x1d, 0xdc, 0x38, 0x31, 0xfa, 0x2d, 0x4b, 0x99, 0xbd, 0x55, 0x8d, - 0x0a, 0x4d, 0x5e, 0xc2, 0xc3, 0x9c, 0xc5, 0x89, 0x78, 0x31, 0x8b, 0x86, 0xf5, 0xbe, 0x21, 0xf7, - 0x77, 0xea, 0x6c, 0x70, 0xf7, 0x4f, 0x5a, 0xa0, 0x26, 0x91, 0x6d, 0x4a, 0x88, 0x9a, 0x44, 0xe4, - 0x10, 0xf6, 0x72, 0x16, 0x46, 0xc3, 0x2c, 0x9d, 0x2c, 0x86, 0xf3, 0x59, 0x14, 0xf2, 0x0d, 0x12, - 0x48, 0x92, 0x2d, 0x2a, 0xa7, 0xe9, 0x64, 0xf1, 0xa9, 0x2a, 0xd4, 0xb8, 0x3d, 0x30, 0xef, 0xce, - 0xed, 0xa6, 0x83, 0x5c, 0x23, 0x30, 0x6e, 0xcb, 0x5d, 0x6c, 0x60, 0xab, 0xd1, 0xc5, 0x46, 0xc3, - 0xd2, 0x9f, 0xf7, 0x60, 0x7b, 0xe3, 0xb9, 0x04, 0x40, 0x3f, 0x7a, 0x37, 0xf0, 0xcf, 0x3d, 0x4b, - 0x21, 0x4d, 0xd8, 0xea, 0x79, 0x47, 0xe7, 0x7e, 0xff, 0x83, 0x85, 0x84, 0x39, 0xf3, 0xfa, 0x27, - 0xc2, 0xa8, 0xc2, 0x74, 0x4f, 0xfd, 0xbe, 0x30, 0x1a, 0x31, 0x00, 0xf7, 0xbc, 0xf7, 0x03, 0x0b, - 0x1f, 0xbf, 0x5e, 0xae, 0xa8, 0x72, 0xb5, 0xa2, 0xca, 0xf5, 0x8a, 0xa2, 0xef, 0x25, 0x45, 0xbf, - 0x4a, 0x8a, 0x2e, 0x4b, 0x8a, 0x96, 0x25, 0x45, 0x7f, 0x4b, 0x8a, 0xfe, 0x95, 0x54, 0xb9, 0x2e, - 0x29, 0xfa, 0xb1, 0xa6, 0xca, 0x72, 0x4d, 0x95, 0xab, 0x35, 0x55, 0x46, 0xba, 0xfc, 0xde, 0x5e, - 0xfd, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x5b, 0x75, 0x81, 0xb2, 0x02, 0x00, 0x00, + // 519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0xc1, 0x6e, 0xd3, 0x40, + 0x14, 0xf4, 0xda, 0x9b, 0xd4, 0x79, 0x69, 0x23, 0x6b, 0x5b, 0x21, 0x93, 0xa2, 0xc5, 0xea, 0xc9, + 0x20, 0x91, 0x8a, 0xc0, 0x01, 0x01, 0x3d, 0xb4, 0x34, 0xa0, 0x44, 0x51, 0x5a, 0x99, 0x90, 0x6b, + 0xe4, 0xc4, 0x8b, 0xb1, 0x9a, 0xd8, 0x91, 0xbd, 0xa9, 0x14, 0x4e, 0x7c, 0x02, 0x3f, 0xc0, 0x9d, + 0x9f, 0xe0, 0xde, 0x63, 0x8e, 0x3d, 0x21, 0xe2, 0x5c, 0x38, 0xf6, 0x13, 0xd0, 0xae, 0xd3, 0x24, + 0x16, 0xb7, 0x37, 0x9e, 0x79, 0x33, 0xf6, 0x1b, 0x03, 0xc4, 0x41, 0xe8, 0xd7, 0x26, 0x71, 0xc4, + 0x23, 0x82, 0xc5, 0x5c, 0x7d, 0xe6, 0x07, 0xfc, 0xcb, 0x74, 0x50, 0x1b, 0x46, 0xe3, 0x63, 0x3f, + 0xf2, 0xa3, 0x63, 0x49, 0x0e, 0xa6, 0x9f, 0x25, 0x92, 0x40, 0x4e, 0xd9, 0xd2, 0xd1, 0x0f, 0x04, + 0xf8, 0x9c, 0x25, 0x43, 0x72, 0x02, 0xa5, 0x20, 0xf4, 0x59, 0xc2, 0x59, 0x9c, 0x98, 0xc8, 0xd2, + 0xec, 0x72, 0xfd, 0x61, 0x4d, 0xba, 0x0b, 0xba, 0xd6, 0xbc, 0xe7, 0x1a, 0x21, 0x8f, 0x67, 0x67, + 0xf8, 0xe6, 0xf7, 0x63, 0xc5, 0xd9, 0x6c, 0x54, 0x2f, 0xa1, 0x92, 0x97, 0x10, 0x03, 0xb4, 0x2b, + 0x36, 0x33, 0x91, 0x85, 0xec, 0x92, 0x23, 0x46, 0x62, 0x43, 0xe1, 0xda, 0x1d, 0x4d, 0x99, 0xa9, + 0x5a, 0xc8, 0x2e, 0xd7, 0x49, 0x66, 0xdf, 0x0c, 0x13, 0xee, 0x86, 0x43, 0x26, 0x62, 0x9c, 0x4c, + 0xf0, 0x5a, 0x7d, 0x85, 0x5a, 0x58, 0x57, 0x0d, 0xed, 0xe8, 0x97, 0x06, 0xbb, 0xdb, 0x0a, 0x42, + 0x00, 0xbb, 0x9e, 0x17, 0xaf, 0x7c, 0xe5, 0x4c, 0x1e, 0x41, 0x89, 0x07, 0x63, 0x96, 0x70, 0x77, + 0x3c, 0x91, 0xe6, 0x9a, 0xb3, 0x79, 0x40, 0x9e, 0x40, 0x21, 0xe1, 0x2e, 0x67, 0xa6, 0x66, 0x21, + 0xbb, 0x52, 0xdf, 0xcf, 0xc7, 0x7e, 0x14, 0x94, 0x93, 0x29, 0xc8, 0x03, 0x28, 0xf2, 0xe8, 0x8a, + 0x85, 0x89, 0x59, 0xb4, 0x34, 0x7b, 0xcf, 0x59, 0x21, 0x11, 0xfa, 0x35, 0x0a, 0x99, 0xb9, 0x93, + 0x85, 0x8a, 0x99, 0x3c, 0x87, 0x83, 0x98, 0xf9, 0x81, 0xf8, 0x62, 0xe6, 0xf5, 0x37, 0xf9, 0xba, + 0xcc, 0xdf, 0xdf, 0x70, 0xdd, 0xf5, 0x9b, 0x54, 0x40, 0x0d, 0x3c, 0xb3, 0x24, 0x4d, 0xd4, 0xc0, + 0x23, 0x27, 0x70, 0x18, 0x33, 0xd7, 0xeb, 0x47, 0xe1, 0x68, 0xd6, 0x9f, 0x4e, 0x3c, 0x97, 0xe7, + 0x9c, 0x40, 0x3a, 0x99, 0x42, 0x72, 0x11, 0x8e, 0x66, 0x9f, 0x32, 0xc1, 0xc6, 0xee, 0x10, 0x4a, + 0xeb, 0x75, 0xb3, 0x6c, 0x21, 0x5b, 0x77, 0xf4, 0x7b, 0x31, 0x79, 0x0b, 0xfa, 0x35, 0x8b, 0x93, + 0x20, 0x0a, 0x13, 0x73, 0x57, 0xd6, 0x69, 0xfd, 0x7f, 0xef, 0x5a, 0x6f, 0x25, 0x91, 0x95, 0x39, + 0xeb, 0x8d, 0xea, 0x1b, 0xd8, 0xcb, 0x51, 0xdb, 0x6d, 0xe2, 0xac, 0xcd, 0x83, 0xed, 0x36, 0x71, + 0xbe, 0x39, 0x6c, 0x14, 0x5a, 0x58, 0x2f, 0x18, 0xc5, 0xa7, 0x6d, 0xd8, 0xcb, 0x5d, 0x9a, 0x00, + 0x14, 0x4f, 0xdf, 0x75, 0x9b, 0xbd, 0x86, 0xa1, 0x90, 0x32, 0xec, 0xb4, 0x1b, 0xa7, 0xbd, 0x66, + 0xe7, 0x83, 0x81, 0x04, 0xb8, 0x6c, 0x74, 0xce, 0x05, 0x50, 0x05, 0x68, 0x5d, 0x34, 0x3b, 0x02, + 0x68, 0x44, 0x07, 0xdc, 0x6e, 0xbc, 0xef, 0x1a, 0xf8, 0xec, 0xe5, 0x7c, 0x41, 0x95, 0xdb, 0x05, + 0x55, 0xee, 0x16, 0x14, 0x7d, 0x4b, 0x29, 0xfa, 0x99, 0x52, 0x74, 0x93, 0x52, 0x34, 0x4f, 0x29, + 0xfa, 0x93, 0x52, 0xf4, 0x37, 0xa5, 0xca, 0x5d, 0x4a, 0xd1, 0xf7, 0x25, 0x55, 0xe6, 0x4b, 0xaa, + 0xdc, 0x2e, 0xa9, 0x32, 0x28, 0xca, 0x5f, 0xfd, 0xc5, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc4, + 0x7c, 0x59, 0x32, 0x2d, 0x03, 0x00, 0x00, } func (x InstanceState) String() string { @@ -366,6 +382,14 @@ func (this *InstanceDesc) Equal(that interface{}) bool { if this.ReadOnly != that1.ReadOnly { return false } + if len(this.Versions) != len(that1.Versions) { + return false + } + for i := range this.Versions { + if this.Versions[i] != that1.Versions[i] { + return false + } + } return true } func (this *Desc) GoString() string { @@ -394,7 +418,7 @@ func (this *InstanceDesc) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 13) + s := make([]string, 0, 14) s = append(s, "&ring.InstanceDesc{") s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n") s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") @@ -405,6 +429,19 @@ func (this *InstanceDesc) GoString() string { s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") s = append(s, "ReadOnlyUpdatedTimestamp: "+fmt.Sprintf("%#v", this.ReadOnlyUpdatedTimestamp)+",\n") s = append(s, "ReadOnly: "+fmt.Sprintf("%#v", this.ReadOnly)+",\n") + keysForVersions := make([]uint64, 0, len(this.Versions)) + for k, _ := range this.Versions { + keysForVersions = append(keysForVersions, k) + } + github_com_gogo_protobuf_sortkeys.Uint64s(keysForVersions) + mapStringForVersions := "map[uint64]uint64{" + for _, k := range keysForVersions { + mapStringForVersions += fmt.Sprintf("%#v: %#v,", k, this.Versions[k]) + } + mapStringForVersions += "}" + if this.Versions != nil { + s = append(s, "Versions: "+mapStringForVersions+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -483,6 +520,21 @@ func (m *InstanceDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Versions) > 0 { + for k := range m.Versions { + v := m.Versions[k] + baseI := i + i = encodeVarintRing(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i = encodeVarintRing(dAtA, i, uint64(k)) + i-- + dAtA[i] = 0x8 + i = encodeVarintRing(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } if m.ReadOnly { i-- if m.ReadOnly { @@ -624,6 +676,14 @@ func (m *InstanceDesc) Size() (n int) { if m.ReadOnly { n += 2 } + if len(m.Versions) > 0 { + for k, v := range m.Versions { + _ = k + _ = v + mapEntrySize := 1 + sovRing(uint64(k)) + 1 + sovRing(uint64(v)) + n += mapEntrySize + 1 + sovRing(uint64(mapEntrySize)) + } + } return n } @@ -657,6 +717,16 @@ func (this *InstanceDesc) String() string { if this == nil { return "nil" } + keysForVersions := make([]uint64, 0, len(this.Versions)) + for k, _ := range this.Versions { + keysForVersions = append(keysForVersions, k) + } + github_com_gogo_protobuf_sortkeys.Uint64s(keysForVersions) + mapStringForVersions := "map[uint64]uint64{" + for _, k := range keysForVersions { + mapStringForVersions += fmt.Sprintf("%v: %v,", k, this.Versions[k]) + } + mapStringForVersions += "}" s := strings.Join([]string{`&InstanceDesc{`, `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, @@ -667,6 +737,7 @@ func (this *InstanceDesc) String() string { `Id:` + fmt.Sprintf("%v", this.Id) + `,`, `ReadOnlyUpdatedTimestamp:` + fmt.Sprintf("%v", this.ReadOnlyUpdatedTimestamp) + `,`, `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Versions:` + mapStringForVersions + `,`, `}`, }, "") return s @@ -1158,6 +1229,105 @@ func (m *InstanceDesc) Unmarshal(dAtA []byte) error { } } m.ReadOnly = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRing + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRing + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Versions == nil { + m.Versions = make(map[uint64]uint64) + } + var mapkey uint64 + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipRing(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRing + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Versions[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRing(dAtA[iNdEx:]) diff --git a/vendor/github.com/grafana/dskit/ring/ring.proto b/vendor/github.com/grafana/dskit/ring/ring.proto index 7795e8493fc30..0dc7d8c51d289 100644 --- a/vendor/github.com/grafana/dskit/ring/ring.proto +++ b/vendor/github.com/grafana/dskit/ring/ring.proto @@ -55,6 +55,12 @@ message InstanceDesc { // Read-only instances go through standard state changes, and special handling is applied to them // during shuffle shards. bool read_only = 11; + + // Map of component / feature identifiers to version number. + // The component / feature identifiers are specific to the application (ie. aren't defined in dskit). + // We use a uint64 for the version number itself as versions are expected to increase and be + // sortable. + map versions = 12; } enum InstanceState { diff --git a/vendor/github.com/grafana/dskit/ring/ring_http.go b/vendor/github.com/grafana/dskit/ring/ring_http.go index d961d8b158341..cfd8e4433a34f 100644 --- a/vendor/github.com/grafana/dskit/ring/ring_http.go +++ b/vendor/github.com/grafana/dskit/ring/ring_http.go @@ -32,24 +32,41 @@ var defaultPageTemplate = template.Must(template.New("webpage").Funcs(template.F type httpResponse struct { Ingesters []ingesterDesc `json:"shards"` Now time.Time `json:"now"` - // ShowTokens indicates whether the Show Tokens button is clicked. - ShowTokens bool `json:"-"` - // DisableTokens hides the concept of tokens entirely in the page, across all elements. - DisableTokens bool `json:"-"` + // TokensToggledOn indicates whether the Show Tokens button is clicked. + TokensToggledOn bool `json:"-"` + Config StatusPageConfig `json:"-"` } type ingesterDesc struct { - ID string `json:"id"` - State string `json:"state"` - Address string `json:"address"` - HeartbeatTimestamp time.Time `json:"timestamp"` - RegisteredTimestamp time.Time `json:"registered_timestamp"` - ReadOnly bool `json:"read_only"` - ReadOnlyUpdatedTimestamp time.Time `json:"read_only_updated_timestamp"` - Zone string `json:"zone"` - Tokens []uint32 `json:"tokens"` - NumTokens int `json:"-"` - Ownership float64 `json:"-"` + ID string `json:"id"` + State string `json:"state"` + Address string `json:"address"` + HeartbeatTimestamp time.Time `json:"timestamp"` + RegisteredTimestamp time.Time `json:"registered_timestamp"` + ReadOnly bool `json:"read_only"` + ReadOnlyUpdatedTimestamp time.Time `json:"read_only_updated_timestamp"` + Zone string `json:"zone"` + Tokens []uint32 `json:"tokens"` + NumTokens int `json:"-"` + Ownership float64 `json:"-"` + Versions map[uint64]uint64 `json:"versions,omitempty"` +} + +// StatusPageConfig configures the ring status page. +// +// Given the values are only used to configure the status page, it is safe to change them between +// releases or have different values on different members of the same ring. +type StatusPageConfig struct { + // HideTokensUIElements allows tokens to be hidden from the status page, for use in contexts which do not utilize tokens. + HideTokensUIElements bool `yaml:"-" json:"-"` + + // ShowVersions enables displaying versions on the status page. + ShowVersions bool `yaml:"-" json:"-"` + + // ComponentNames are the names of the components in InstanceDesc.Versions, used only for display on the status page. + // If a component in Versions has no name in ComponentNames, then the version will be shown on the status page + // without a name. + ComponentNames map[uint64]string `yaml:"-" json:"-"` } type ringAccess interface { @@ -60,14 +77,14 @@ type ringAccess interface { type ringPageHandler struct { r ringAccess heartbeatTimeout time.Duration - disableTokens bool + config StatusPageConfig } -func newRingPageHandler(r ringAccess, heartbeatTimeout time.Duration, disableTokens bool) *ringPageHandler { +func newRingPageHandler(r ringAccess, heartbeatTimeout time.Duration, config StatusPageConfig) *ringPageHandler { return &ringPageHandler{ r: r, heartbeatTimeout: heartbeatTimeout, - disableTokens: disableTokens, + config: config, } } @@ -131,16 +148,17 @@ func (h *ringPageHandler) handle(w http.ResponseWriter, req *http.Request) { Zone: ing.Zone, NumTokens: len(ing.Tokens), Ownership: (float64(ownedTokens[id]) / float64(math.MaxUint32)) * 100, + Versions: ing.Versions, }) } tokensParam := req.URL.Query().Get("tokens") renderHTTPResponse(w, httpResponse{ - Ingesters: ingesters, - Now: now, - ShowTokens: tokensParam == "true", - DisableTokens: h.disableTokens, + Ingesters: ingesters, + Now: now, + TokensToggledOn: tokensParam == "true", + Config: h.config, }, defaultPageTemplate, req) } diff --git a/vendor/github.com/grafana/dskit/ring/ring_status.gohtml b/vendor/github.com/grafana/dskit/ring/ring_status.gohtml index 055873f3b499b..011f6a8db8a69 100644 --- a/vendor/github.com/grafana/dskit/ring/ring_status.gohtml +++ b/vendor/github.com/grafana/dskit/ring/ring_status.gohtml @@ -21,10 +21,13 @@ Read-Only Read-Only Updated Last Heartbeat - {{ if not .DisableTokens }} + {{ if not .Config.HideTokensUIElements }} Tokens Ownership {{ end }} + {{ if .Config.ShowVersions }} + Versions + {{ end }} Actions @@ -48,10 +51,21 @@ {{ .ReadOnlyUpdatedTimestamp | timeOrEmptyString }} {{ end }} {{ .HeartbeatTimestamp | durationSince }} ago ({{ .HeartbeatTimestamp.Format "15:04:05.999" }}) - {{ if not $.DisableTokens }} + {{ if not $.Config.HideTokensUIElements }} {{ .NumTokens }} {{ .Ownership | humanFloat }}% {{ end }} + {{ if $.Config.ShowVersions }} + + {{ range $k, $v := .Versions }} + {{ with $componentName := index $.Config.ComponentNames $k }} + {{ $k }} ({{ $componentName }}): v{{ $v }}
+ {{ else }} + {{ $k }}: v{{ $v }}
+ {{ end }} + {{ end }} + + {{ end }} @@ -61,14 +75,14 @@
- {{ if not .DisableTokens}} - {{ if .ShowTokens }} + {{ if not .Config.HideTokensUIElements }} + {{ if .TokensToggledOn }} {{ else }} {{ end }} - {{ if .ShowTokens }} + {{ if .TokensToggledOn }} {{ range $i, $ing := .Ingesters }}

Instance: {{ .ID }}

diff --git a/vendor/github.com/grafana/dskit/runtimeconfig/manager.go b/vendor/github.com/grafana/dskit/runtimeconfig/manager.go index b7efa93c721e6..05a9f2fcd83b3 100644 --- a/vendor/github.com/grafana/dskit/runtimeconfig/manager.go +++ b/vendor/github.com/grafana/dskit/runtimeconfig/manager.go @@ -18,6 +18,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/atomic" "gopkg.in/yaml.v3" "github.com/grafana/dskit/flagext" @@ -58,8 +59,7 @@ type Manager struct { listenersMtx sync.Mutex listeners []chan interface{} - configMtx sync.RWMutex - config interface{} + configPtr atomic.Pointer[interface{}] configLoadSuccess prometheus.Gauge configHash *prometheus.GaugeVec @@ -196,14 +196,18 @@ func (om *Manager) loadConfig() error { } mergedConfig := map[string]interface{}{} - for _, f := range om.cfg.LoadPath { + for i, f := range om.cfg.LoadPath { data := rawData[f] yamlFile, err := om.unmarshalMaybeGzipped(f, data) if err != nil { om.configLoadSuccess.Set(0) return errors.Wrapf(err, "unmarshal file %q", f) } - mergedConfig = mergeConfigMaps(mergedConfig, yamlFile) + mergedConfig, err = mergeConfigMaps(mergedConfig, yamlFile, "") + if err != nil { + om.configLoadSuccess.Set(0) + return errors.Wrapf(err, "can't merge file %q on top of the previous %#v", f, om.cfg.LoadPath[:i]) + } } buf, err := yaml.Marshal(mergedConfig) @@ -258,29 +262,50 @@ func isGzip(data []byte) bool { return len(data) > 2 && data[0] == 0x1f && data[1] == 0x8b } -func mergeConfigMaps(a, b map[string]interface{}) map[string]interface{} { +func mergeConfigMaps(a, b map[string]interface{}, path string) (_ map[string]interface{}, err error) { out := make(map[string]interface{}, len(a)) for k, v := range a { out[k] = v } for k, v := range b { + aVal, aHasKey := a[k] + bVal, bHasKey := b[k] + + _, aIsMap := a[k].(map[string]interface{}) + _, bIsMap := b[k].(map[string]interface{}) + + if aHasKey && aVal == nil && bIsMap { + aIsMap = true + out[k] = make(map[string]interface{}) + } + + if bHasKey && bVal == nil && aIsMap { + bIsMap = true + v = make(map[string]interface{}) + } + + if aHasKey && aIsMap != bIsMap { + return nil, errors.Errorf("conflicting types for %q: %T != %T", path+"."+k, a[k], b[k]) + } + if v, ok := v.(map[string]interface{}); ok { if bv, ok := out[k]; ok { if bv, ok := bv.(map[string]interface{}); ok { - out[k] = mergeConfigMaps(bv, v) + out[k], err = mergeConfigMaps(bv, v, path+"."+k) + if err != nil { + return nil, err + } continue } } } out[k] = v } - return out + return out, nil } func (om *Manager) setConfig(config interface{}) { - om.configMtx.Lock() - defer om.configMtx.Unlock() - om.config = config + om.configPtr.Store(&config) } func (om *Manager) callListeners(newValue interface{}) { @@ -311,8 +336,8 @@ func (om *Manager) stopping(_ error) error { // GetConfig returns last loaded config value, possibly nil. func (om *Manager) GetConfig() interface{} { - om.configMtx.RLock() - defer om.configMtx.RUnlock() - - return om.config + if p := om.configPtr.Load(); p != nil { + return *p + } + return nil } diff --git a/vendor/github.com/grafana/dskit/server/limits.go b/vendor/github.com/grafana/dskit/server/limits.go index b9c9f3b3117af..7096d25fc24f6 100644 --- a/vendor/github.com/grafana/dskit/server/limits.go +++ b/vendor/github.com/grafana/dskit/server/limits.go @@ -3,13 +3,21 @@ package server import ( "context" "strings" + "sync" + "time" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/tap" ) +// unprocessedRequestCheckTimeout is large enough for a normal request to start processing, +// and small enough to cleanup quickly if the request was cancelled and early aborted. +const unprocessedRequestCheckTimeout = 10 * time.Second + type GrpcInflightMethodLimiter interface { // RPCCallStarting is called before request has been read into memory. // All that's known about the request at this point is grpc method name. @@ -26,12 +34,14 @@ type GrpcInflightMethodLimiter interface { RPCCallProcessing(ctx context.Context, methodName string) (func(error), error) // RPCCallFinished is called when an RPC call is finished being handled. + // Under certain very rare race conditions it might be called earlier than the actual request processing is finished. RPCCallFinished(ctx context.Context) } -func newGrpcInflightLimitCheck(methodLimiter GrpcInflightMethodLimiter) *grpcInflightLimitCheck { +func newGrpcInflightLimitCheck(methodLimiter GrpcInflightMethodLimiter, logger log.Logger) *grpcInflightLimitCheck { return &grpcInflightLimitCheck{ methodLimiter: methodLimiter, + logger: logger, } } @@ -39,11 +49,17 @@ func newGrpcInflightLimitCheck(methodLimiter GrpcInflightMethodLimiter) *grpcInf // grpcInflightLimitCheck can track inflight requests, and reject requests before even reading them into memory. type grpcInflightLimitCheck struct { methodLimiter GrpcInflightMethodLimiter + + logger log.Logger + + // Used to mock time.AfterFunc in tests. + timeAfterFuncMock func(d time.Duration, f func()) testableTimer } // TapHandle is called after receiving grpc request and headers, but before reading any request data yet. // If we reject request here (by returning non-nil error), it won't be counted towards any metrics (eg. in middleware.grpcStatsHandler). -// If we accept request (no error), eventually HandleRPC with stats.End notification will be called. +// If we accept request (no error), the request should be processed and eventually HandleRPC with stats.End notification will be called, +// unless the context is cancelled before we start processing the request. func (g *grpcInflightLimitCheck) TapHandle(ctx context.Context, info *tap.Info) (context.Context, error) { if !isMethodNameValid(info.FullMethodName) { // If method name is not valid, we let the request continue, but not call method limiter. @@ -51,7 +67,61 @@ func (g *grpcInflightLimitCheck) TapHandle(ctx context.Context, info *tap.Info) return ctx, nil } - return g.methodLimiter.RPCCallStarting(ctx, info.FullMethodName, info.Header) + ctx, err := g.methodLimiter.RPCCallStarting(ctx, info.FullMethodName, info.Header) + if err != nil { + return ctx, err + } + + // We called RPCCallStarting, so we need to ensure RPCCallFinished is called once the request is done. + // Because of a shortcut introduced in https://github.com/grpc/grpc-go/pull/8439 this may not happen. + // We could create a goroutine that would watch ctx.Done() and call RPCCallFinished if the context is done and we have not started processing the headers yet. + // However, that would mean paying the cost of an extra goroutine for every single gRPC request, just in case the request's context is cancelled before we start processing it. + // Instead of that we schedule a cheaper timer that we will cancel in the happy case, which will run after 10s and perform the cleanup only when needed. + state := &gprcInflightLimitCheckerState{ + fullMethod: info.FullMethodName, + timestamp: time.Now(), + headersProcessed: make(chan struct{}), + } + state.nonProcessedRequestTimer = g.timeAfterFunc(unprocessedRequestCheckTimeout, g.checkProbablyEarlyAbortedRequest(ctx, state)) + + return context.WithValue(ctx, gprcInflightLimitCheckerStateKey{}, state), nil +} + +func (g *grpcInflightLimitCheck) checkProbablyEarlyAbortedRequest(ctx context.Context, state *gprcInflightLimitCheckerState) func() { + return func() { + // If this function is running, we're in a corner case. Be very verbose in logging to help with debugging. + logger := state.logger(g.logger) + + level.Warn(g.logger).Log("msg", "gRPC request processing didn't start within 10s of receiving, checking the context state") + select { + case <-ctx.Done(): + level.Info(logger).Log("msg", "gRPC request context is done, assuming the request was cancelled before processing started, will call RPCCallFinished") + case <-state.headersProcessed: + level.Info(logger).Log("msg", "gRPC request processing has started, no need to call RPCCallFinished", "time_to_start_processing", time.Since(state.timestamp).String()) + return + default: + level.Info(logger).Log("msg", "gRPC request context is not done and processing hasn't started, will wait until context is done or processing starts") + + select { + case <-ctx.Done(): + level.Info(logger).Log("msg", "gRPC request context is finally done, assuming the request was cancelled before processing started, will call RPCCallFinished") + case <-state.headersProcessed: + level.Info(logger).Log("msg", "gRPC request processing has finally started, no need to call RPCCallFinished", "time_to_start_processing", time.Since(state.timestamp).String()) + return + } + } + + called := false + state.rpcCallFinishedOnce.Do(func() { + called = true + g.methodLimiter.RPCCallFinished(ctx) + }) + if called { + level.Info(logger).Log("msg", "called RPCCallFinished for gRPC request that never started processing") + } else { + level.Info(logger).Log("msg", "RPCCallFinishes was already called for this gRPC request, no need to call it again") + } + } } func (g *grpcInflightLimitCheck) UnaryServerInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { @@ -78,18 +148,40 @@ func (g *grpcInflightLimitCheck) StreamServerInterceptor(srv interface{}, ss grp } return err } - func (g *grpcInflightLimitCheck) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context { return ctx } func (g *grpcInflightLimitCheck) HandleRPC(ctx context.Context, rpcStats stats.RPCStats) { - // when request ends, and we started "inflight" request tracking for it, finish it. - if _, ok := rpcStats.(*stats.End); !ok { - return + switch rpcStats.(type) { + case *stats.InHeader: + if state, ok := ctx.Value(gprcInflightLimitCheckerStateKey{}).(*gprcInflightLimitCheckerState); ok { + // We're processing this request, stop the timer. + if !state.nonProcessedRequestTimer.Stop() { + level.Warn(state.logger(g.logger)).Log("msg", "gRPC request processing has started, but the non-processing timer already fired, need to signal that we're processing it now") + // The timer has already expired, so the function is either executing or has executed. + // + // This (stats.InHeader) should be called once and only once, but gRPC is known for changing contracts + // and we don't want this to start panicking trying to close the channel multiple times, + // so a sync.Once doesn't hurt here. + state.headersProcessedOnce.Do(func() { close(state.headersProcessed) }) + } + } + + case *stats.End: + if state, ok := ctx.Value(gprcInflightLimitCheckerStateKey{}).(*gprcInflightLimitCheckerState); ok { + // We're done processing the request, but there's a scenario under which we may have already called RPCCallFinished, + // from the goroutine watching the context in TapHandle, so we need to ensure it's called only once. + called := false + state.rpcCallFinishedOnce.Do(func() { + g.methodLimiter.RPCCallFinished(ctx) + called = true + }) + if !called { + level.Warn(state.logger(g.logger)).Log("msg", "RPCCallFinished was already called for this gRPC request before the request actually finished processing") + } + } } - - g.methodLimiter.RPCCallFinished(ctx) } func (g *grpcInflightLimitCheck) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { @@ -100,6 +192,13 @@ func (g *grpcInflightLimitCheck) HandleConn(_ context.Context, _ stats.ConnStats // Not interested. } +func (g *grpcInflightLimitCheck) timeAfterFunc(d time.Duration, f func()) testableTimer { + if g.timeAfterFuncMock != nil { + return g.timeAfterFuncMock(d, f) + } + return testableTimer{timer: time.AfterFunc(d, f)} +} + // This function mimics the check in grpc library, server.go, handleStream method. handleStream method can stop processing early, // without calling stat handler if the method name is invalid. func isMethodNameValid(method string) bool { @@ -109,3 +208,36 @@ func isMethodNameValid(method string) bool { pos := strings.LastIndex(method, "/") return pos >= 0 } + +type gprcInflightLimitCheckerStateKey struct{} + +type gprcInflightLimitCheckerState struct { + fullMethod string + timestamp time.Time + + nonProcessedRequestTimer testableTimer + headersProcessedOnce sync.Once + headersProcessed chan struct{} + + rpcCallFinishedOnce sync.Once +} + +func (state *gprcInflightLimitCheckerState) logger(baseLogger log.Logger) log.Logger { + return log.With(baseLogger, "method", state.fullMethod, "req_timestamp", state.timestamp.Format(time.RFC3339Nano)) +} + +type testableTimer struct { + // timer is what we use in production code. + timer *time.Timer + + // stop is used in tests to mock timer stopping behavior. + stop func() bool +} + +func (t testableTimer) Stop() bool { + if t.timer != nil { + return t.timer.Stop() + } + + return t.stop() +} diff --git a/vendor/github.com/grafana/dskit/server/metrics.go b/vendor/github.com/grafana/dskit/server/metrics.go index 6378cd52e17bb..9339cda82c4cf 100644 --- a/vendor/github.com/grafana/dskit/server/metrics.go +++ b/vendor/github.com/grafana/dskit/server/metrics.go @@ -32,6 +32,10 @@ type Metrics struct { func NewServerMetrics(cfg Config) *Metrics { reg := cfg.registererOrDefault() factory := promauto.With(reg) + messageSizeNativeHistogramFactor := float64(0) + if cfg.MetricsMessageSizeNativeHistograms { + messageSizeNativeHistogramFactor = cfg.MetricsNativeHistogramFactor + } return &Metrics{ TCPConnections: factory.NewGaugeVec(prometheus.GaugeOpts{ @@ -73,16 +77,22 @@ func NewServerMetrics(cfg Config) *Metrics { Help: "Total count of requests for a particular tenant.", }, []string{"method", "route", "status_code", "ws", "tenant"}), ReceivedMessageSize: factory.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: cfg.MetricsNamespace, - Name: "request_message_bytes", - Help: "Size (in bytes) of messages received in the request.", - Buckets: middleware.BodySizeBuckets, + Namespace: cfg.MetricsNamespace, + Name: "request_message_bytes", + Help: "Size (in bytes) of messages received in the request.", + Buckets: middleware.BodySizeBuckets, + NativeHistogramBucketFactor: messageSizeNativeHistogramFactor, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: time.Hour, }, []string{"method", "route"}), SentMessageSize: factory.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: cfg.MetricsNamespace, - Name: "response_message_bytes", - Help: "Size (in bytes) of messages sent in response.", - Buckets: middleware.BodySizeBuckets, + Namespace: cfg.MetricsNamespace, + Name: "response_message_bytes", + Help: "Size (in bytes) of messages sent in response.", + Buckets: middleware.BodySizeBuckets, + NativeHistogramBucketFactor: messageSizeNativeHistogramFactor, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: time.Hour, }, []string{"method", "route"}), InflightRequests: factory.NewGaugeVec(prometheus.GaugeOpts{ Namespace: cfg.MetricsNamespace, diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go index 02335b04acc69..ca2d5562f8198 100644 --- a/vendor/github.com/grafana/dskit/server/server.go +++ b/vendor/github.com/grafana/dskit/server/server.go @@ -85,6 +85,9 @@ type Config struct { // https://pkg.go.dev/github.com/prometheus/client_golang/prometheus#HistogramOpts // for details. A generally useful value is 1.1. MetricsNativeHistogramFactor float64 `yaml:"-"` + // MetricsMessageSizeNativeHistograms enables use of MetricsNativeHistogramFactor for response_message_bytes, + // request_message_bytes metrics + MetricsMessageSizeNativeHistograms bool `yaml:"-"` HTTPListenNetwork string `yaml:"http_listen_network"` HTTPListenAddress string `yaml:"http_listen_address"` @@ -436,7 +439,7 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...) if cfg.ClusterValidation.GRPC.Enabled { grpcMiddleware = append(grpcMiddleware, middleware.ClusterUnaryServerInterceptor( - cfg.ClusterValidation.Label, cfg.ClusterValidation.GRPC.SoftValidation, + cfg.ClusterValidation.GetAllowedClusterLabels(), cfg.ClusterValidation.GRPC.SoftValidation, metrics.InvalidClusterRequests, logger, )) } @@ -471,7 +474,7 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { var grpcServerLimit *grpcInflightLimitCheck if cfg.GrpcMethodLimiter != nil { - grpcServerLimit = newGrpcInflightLimitCheck(cfg.GrpcMethodLimiter) + grpcServerLimit = newGrpcInflightLimitCheck(cfg.GrpcMethodLimiter, logger) grpcMiddleware = append(grpcMiddleware, grpcServerLimit.UnaryServerInterceptor) grpcStreamMiddleware = append(grpcStreamMiddleware, grpcServerLimit.StreamServerInterceptor) } @@ -613,7 +616,7 @@ func BuildHTTPMiddleware(cfg Config, router *mux.Router, metrics *Metrics, logge } if cfg.ClusterValidation.HTTP.Enabled { httpMiddleware = append(httpMiddleware, middleware.ClusterValidationMiddleware( - cfg.ClusterValidation.Label, + cfg.ClusterValidation.GetAllowedClusterLabels(), cfg.ClusterValidation.HTTP, metrics.InvalidClusterRequests, logger, diff --git a/vendor/github.com/grafana/gomemcache/memcache/line_reader.go b/vendor/github.com/grafana/gomemcache/memcache/line_reader.go new file mode 100644 index 0000000000000..e959eb5f8574a --- /dev/null +++ b/vendor/github.com/grafana/gomemcache/memcache/line_reader.go @@ -0,0 +1,84 @@ +package memcache + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" +) + +type lineReader interface { + ReadLine(from io.Reader, lineLength int) ([]byte, error) +} + +type allocatingLineReader struct { + allocator Allocator +} + +func (s allocatingLineReader) ReadLine(from io.Reader, lineLength int) ([]byte, error) { + // Note that lineLength MUST account for the trailing \r\n. + if lineLength < len(crlf) { + return nil, errors.New("line length too small: must include CRLF") + } + + // Get can return a larger buffer than requested, but never smaller. + buff := s.allocator.Get(lineLength) + + destBuf := (*buff)[:lineLength] + _, err := io.ReadFull(from, destBuf) + if err != nil { + s.allocator.Put(buff) + return nil, fmt.Errorf("failed to read line: %w", err) + } + if !bytes.HasSuffix(destBuf, crlf) { + s.allocator.Put(buff) + return nil, fmt.Errorf("line is not followed by CRLF") + } + return destBuf[:lineLength-len(crlf)], nil +} + +type noopLineReader struct{} + +func (s noopLineReader) ReadLine(from io.Reader, lineLength int) ([]byte, error) { + _, err := io.CopyN(io.Discard, from, int64(lineLength)) + if err != nil { + return nil, fmt.Errorf("discarding line: %w", err) + } + return nil, nil +} + +func tryDiscardLines(r *bufio.Reader) error { + for { + _, err := readLine(r, noopLineReader{}) + if errors.Is(err, io.EOF) { + return nil + } else if err != nil { + return fmt.Errorf("memcache GetMulti: discarding cancelled response: %w", err) + } + } +} + +func readLine[R lineReader](r *bufio.Reader, buff R) (*Item, error) { + line, err := r.ReadSlice('\n') + if err != nil { + return nil, err + } + if bytes.Equal(line, resultEnd) { + return nil, io.EOF + } + it := new(Item) + size, err := scanGetResponseLine(line, it) + if err != nil { + return nil, err + } + + // Expect the line to end with \r\n + readSize := size + len(crlf) + + it.Value, err = buff.ReadLine(r, readSize) + if err != nil { + return nil, fmt.Errorf("memcache: corrupt get result: %w", err) + } + return it, nil +} diff --git a/vendor/github.com/grafana/gomemcache/memcache/memcache.go b/vendor/github.com/grafana/gomemcache/memcache/memcache.go index 37039ba4cd287..30aa3cb3b31db 100644 --- a/vendor/github.com/grafana/gomemcache/memcache/memcache.go +++ b/vendor/github.com/grafana/gomemcache/memcache/memcache.go @@ -236,6 +236,10 @@ func (cn *conn) extendDeadline() { _ = cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout())) } +func (cn *conn) extendDeadlineLong() { + _ = cn.nc.SetDeadline(time.Now().Add(5 * cn.c.netTimeout())) +} + // condRelease releases this connection if the error pointed to by err // is nil (not an error) or is only a protocol level error (e.g. a // cache miss). The purpose is to not recycle TCP connections that @@ -630,74 +634,52 @@ func (c *Client) GetMulti(ctx context.Context, keys []string, opts ...Option) (m ch := make(chan error, buffered) for addr, keys := range keyMap { go func(addr net.Addr, keys []string) { - err := c.getFromAddr(ctx, addr, keys, options, addItemToMap) - select { - case ch <- err: - case <-ctx.Done(): - } + ch <- c.getFromAddr(ctx, addr, keys, options, addItemToMap) }(addr, keys) } var err error for i := 0; i < len(keyMap); i++ { - select { - case ge := <-ch: - if ge != nil { - err = ge - } - case <-ctx.Done(): - return nil, fmt.Errorf("memcache GetMulti: %w", ctx.Err()) + ge := <-ch + if ge != nil { + err = ge } } + if ctx.Err() != nil { + return nil, fmt.Errorf("memcache GetMulti: %w", ctx.Err()) + } return m, err } // parseGetResponse reads a GET response from r and calls cb for each // read and allocated Item func (c *Client) parseGetResponse(ctx context.Context, r *bufio.Reader, conn *conn, opts *Options, cb func(*Item)) error { + lineReader := allocatingLineReader{ + allocator: opts.Alloc, + } for { - // extend deadline before each additional call, otherwise all cumulative calls use the same overall deadline - conn.extendDeadline() - line, err := r.ReadSlice('\n') - - if err != nil { - return err - } - if bytes.Equal(line, resultEnd) { - return nil - } - it := new(Item) - size, err := scanGetResponseLine(line, it) - if err != nil { - return err - } - buffSize := size + 2 - // Check if context is cancelled before allocating memory select { case <-ctx.Done(): - // Still need to read the data to keep connection in valid state - _, err = io.CopyN(io.Discard, r, int64(buffSize)) + // Try to discard the rest of the response to keep the connection in a good state + // We don't want to block forever here, so use a longer deadline than usual, but don't renew it on every item read. + conn.extendDeadlineLong() + err := tryDiscardLines(r) if err != nil { - return err + return fmt.Errorf("memcache GetMulti: %w %w", ctx.Err(), err) } - // Continue reading without processing to maintain connection state - continue + return nil default: } - buff := opts.Alloc.Get(buffSize) - it.Value = (*buff)[:buffSize] - _, err = io.ReadFull(r, it.Value) - if err != nil { - opts.Alloc.Put(buff) + // extend deadline before each additional call, otherwise all cumulative calls use the same overall deadline + conn.extendDeadline() + it, err := readLine(r, lineReader) + if errors.Is(err, io.EOF) { + return nil + } else if err != nil { return err } - if !bytes.HasSuffix(it.Value, crlf) { - opts.Alloc.Put(buff) - return fmt.Errorf("memcache: corrupt get result read") - } - it.Value = it.Value[:size] cb(it) } } diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/block.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/block.go index 349c77a97c96f..edfa1d54bebef 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/block.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/block.go @@ -10,7 +10,8 @@ import ( ) // BlockProfiler is a stateful profiler for goroutine blocking events and mutex contention in Go programs. -// Depending on the function used to create the BlockProfiler, it uses either runtime.BlockProfile or runtime.MutexProfile. +// Depending on the function used to create the BlockProfiler, it uses either runtime.BlockProfile or +// runtime.MutexProfile. // The BlockProfiler provides similar functionality to pprof.Lookup("block").WriteTo and pprof.Lookup("mutex").WriteTo, // but with some key differences. // @@ -29,6 +30,7 @@ type BlockProfiler struct { runtimeProfile func([]runtime.BlockProfileRecord) (int, bool) scaleProfile pprof.MutexProfileScaler options pprof.ProfileBuilderOptions + gz gz } // NewMutexProfiler creates a new BlockProfiler instance for profiling mutex contention. @@ -100,19 +102,23 @@ func (d *BlockProfiler) Profile(w io.Writer) error { defer d.mutex.Unlock() var p []runtime.BlockProfileRecord - n, ok := d.runtimeProfile(nil) + var ok bool + n, _ := d.runtimeProfile(nil) for { p = make([]runtime.BlockProfileRecord, n+50) n, ok = d.runtimeProfile(p) if ok { p = p[:n] + break } } sort.Slice(p, func(i, j int) bool { return p[i].Cycles > p[j].Cycles }) + zw := d.gz.get(w) stc := pprof.MutexProfileConfig() - b := pprof.NewProfileBuilder(w, &d.options, stc) + b := pprof.NewProfileBuilder(w, zw, &d.options, stc) + return d.impl.PrintCountCycleProfile(b, d.scaleProfile, p) } diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/gzip.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/gzip.go new file mode 100644 index 0000000000000..5ecbf1805a391 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/gzip.go @@ -0,0 +1,21 @@ +package godeltaprof + +import ( + "io" + + "github.com/klauspost/compress/gzip" +) + +type gz struct { + w *gzip.Writer +} + +func (g *gz) get(w io.Writer) *gzip.Writer { + if g.w == nil { + zw, _ := gzip.NewWriterLevel(w, gzip.BestSpeed) + g.w = zw + } + g.w.Reset(w) + + return g.w +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/heap.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/heap.go index 964b8ad6cfe95..07f2fce16dad5 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/heap.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/heap.go @@ -31,6 +31,7 @@ type HeapProfiler struct { impl pprof.DeltaHeapProfiler mutex sync.Mutex options pprof.ProfileBuilderOptions + gz gz } func NewHeapProfiler() *HeapProfiler { @@ -63,7 +64,8 @@ func (d *HeapProfiler) Profile(w io.Writer) error { // and also try again if we're very unlucky. // The loop should only execute one iteration in the common case. var p []runtime.MemProfileRecord - n, ok := runtime.MemProfile(nil, true) + var ok bool + n, _ := runtime.MemProfile(nil, true) for { // Allocate room for a slightly bigger profile, // in case a few more entries have been added @@ -72,11 +74,15 @@ func (d *HeapProfiler) Profile(w io.Writer) error { n, ok = runtime.MemProfile(p, true) if ok { p = p[0:n] + break } // Profile grew; try again. } rate := int64(runtime.MemProfileRate) - b := pprof.NewProfileBuilder(w, &d.options, pprof.HeapProfileConfig(rate)) + + zw := d.gz.get(w) + b := pprof.NewProfileBuilder(w, zw, &d.options, pprof.HeapProfileConfig(rate)) + return d.impl.WriteHeapProto(b, p, rate) } diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/http/pprof/pprof.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/http/pprof/pprof.go index 5adaf00541f37..6789a1d4d6778 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/http/pprof/pprof.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/http/pprof/pprof.go @@ -11,9 +11,9 @@ import ( ) var ( - deltaHeapProfiler = godeltaprof.NewHeapProfiler() - deltaBlockProfiler = godeltaprof.NewBlockProfiler() - deltaMutexProfiler = godeltaprof.NewMutexProfiler() + deltaHeapProfiler = godeltaprof.NewHeapProfiler() //nolint:gochecknoglobals + deltaBlockProfiler = godeltaprof.NewBlockProfiler() //nolint:gochecknoglobals + deltaMutexProfiler = godeltaprof.NewMutexProfiler() //nolint:gochecknoglobals ) type deltaProfiler interface { diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_heap.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_heap.go index 883d02008f76a..0bba813a18b16 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_heap.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_heap.go @@ -17,10 +17,12 @@ type heapAccValue struct { type DeltaHeapProfiler struct { m profMap[heapPrevValue, heapAccValue] - //todo consider adding an option to remove block size label and merge allocations of different size + // todo consider adding an option to remove block size label and merge allocations of different size } // WriteHeapProto writes the current heap profile in protobuf format to w. +// +//nolint:gocognit func (d *DeltaHeapProfiler) WriteHeapProto(b ProfileBuilder, p []runtime.MemProfileRecord, rate int64) error { values := []int64{0, 0, 0, 0} var locs []uint64 @@ -90,6 +92,7 @@ func (d *DeltaHeapProfiler) WriteHeapProto(b ProfileBuilder, p []runtime.MemProf } // Found non-runtime. Show any runtime uses above it. stk = stk[i:] + break } } @@ -103,6 +106,7 @@ func (d *DeltaHeapProfiler) WriteHeapProto(b ProfileBuilder, p []runtime.MemProf b.Sample(values, locs, blockSize) } b.Build() + return nil } diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_mutex.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_mutex.go index 5c177e3fca880..cf91722f2a5fe 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_mutex.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_mutex.go @@ -23,8 +23,8 @@ type DeltaMutexProfiler struct { // are done because The proto expects count and time (nanoseconds) instead of count // and the number of cycles for block, contention profiles. // Possible 'scaler' functions are scaleBlockProfile and scaleMutexProfile. -func (d *DeltaMutexProfiler) PrintCountCycleProfile(b ProfileBuilder, scaler MutexProfileScaler, records []runtime.BlockProfileRecord) error { - +func (d *DeltaMutexProfiler) PrintCountCycleProfile(b ProfileBuilder, scaler MutexProfileScaler, + records []runtime.BlockProfileRecord) error { cpuGHz := float64(runtime_cyclesPerSecond()) / 1e9 values := []int64{0, 0} @@ -70,6 +70,7 @@ func (d *DeltaMutexProfiler) PrintCountCycleProfile(b ProfileBuilder, scaler Mut b.Sample(values, locs, 0) } b.Build() + return nil } diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/elf.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/elf.go index a8b5ea68175f9..9840030e8b588 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/elf.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/elf.go @@ -6,8 +6,8 @@ package pprof import ( "encoding/binary" + "encoding/hex" "errors" - "fmt" "os" ) @@ -18,13 +18,17 @@ var ( // elfBuildID returns the GNU build ID of the named ELF binary, // without introducing a dependency on debug/elf and its dependencies. +// +//nolint:gocognit func elfBuildID(file string) (string, error) { buf := make([]byte, 256) - f, err := os.Open(file) + f, err := os.Open(file) //nolint:gosec if err != nil { return "", err } - defer f.Close() + defer func() { + _ = f.Close() + }() if _, err := f.ReadAt(buf[:64], 0); err != nil { return "", err @@ -58,7 +62,7 @@ func elfBuildID(file string) (string, error) { } shnum = int(byteOrder.Uint16(buf[48:])) case 2: // 64-bit file header - shoff = int64(byteOrder.Uint64(buf[40:])) + shoff = int64(byteOrder.Uint64(buf[40:])) //nolint:gosec shentsize = int64(byteOrder.Uint16(buf[58:])) if shentsize != 64 { return "", errBadELF @@ -80,8 +84,8 @@ func elfBuildID(file string) (string, error) { size = int64(byteOrder.Uint32(buf[20:])) } else { // 64-bit section header - off = int64(byteOrder.Uint64(buf[24:])) - size = int64(byteOrder.Uint64(buf[32:])) + off = int64(byteOrder.Uint64(buf[24:])) //nolint:gosec + size = int64(byteOrder.Uint64(buf[32:])) //nolint:gosec } size += off for off < size { @@ -93,7 +97,7 @@ func elfBuildID(file string) (string, error) { noteType := int(byteOrder.Uint32(buf[8:])) descOff := off + int64(12+(nameSize+3)&^3) off = descOff + int64((descSize+3)&^3) - if nameSize != 4 || noteType != 3 || buf[12] != 'G' || buf[13] != 'N' || buf[14] != 'U' || buf[15] != '\x00' { // want name GNU\x00 type 3 (NT_GNU_BUILD_ID) + if nameSize != 4 || noteType != 3 || buf[12] != 'G' || buf[13] != 'N' || buf[14] != 'U' || buf[15] != '\x00' { //nolint:lll // want name GNU\x00 type 3 (NT_GNU_BUILD_ID) continue } if descSize > len(buf) { @@ -102,8 +106,10 @@ func elfBuildID(file string) (string, error) { if _, err := f.ReadAt(buf[:descSize], descOff); err != nil { return "", err } - return fmt.Sprintf("%x", buf[:descSize]), nil + + return hex.EncodeToString(buf[:descSize]), nil } } + return "", errNoBuildID } diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go16.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go16.go deleted file mode 100644 index 4992f7bfd68d9..0000000000000 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go16.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build go1.16 && !go1.17 -// +build go1.16,!go1.17 - -package pprof - -import ( - "compress/gzip" - "io" -) - -type gzipWriter struct { - *gzip.Writer -} - -func newGzipWriter(w io.Writer) gzipWriter { - zw, _ := gzip.NewWriterLevel(w, gzip.BestSpeed) - return gzipWriter{zw} -} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go17.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go17.go deleted file mode 100644 index a5a51c0fe9f48..0000000000000 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go17.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -package pprof - -import ( - "io" - - "github.com/klauspost/compress/gzip" -) - -type gzipWriter struct { - *gzip.Writer -} - -func newGzipWriter(w io.Writer) gzipWriter { - zw, _ := gzip.NewWriterLevel(w, gzip.BestSpeed) - return gzipWriter{zw} -} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/map.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/map.go index dcb6e569c84b3..921ca8d7d4079 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/map.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/map.go @@ -29,10 +29,10 @@ func (m *profMap[PREV, ACC]) Lookup(stk []uintptr, tag uintptr) *profMapEntry[PR h := uintptr(0) for _, x := range stk { h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1))) - h += uintptr(x) * 41 + h += x * 41 } h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1))) - h += uintptr(tag) * 41 + h += tag * 41 // Find entry if present. var last *profMapEntry[PREV, ACC] @@ -42,20 +42,21 @@ Search: continue } for j := range stk { - if e.stk[j] != uintptr(stk[j]) { + if e.stk[j] != stk[j] { continue Search } } - // Move to front. + // Move to the front. if last != nil { last.nextHash = e.nextHash e.nextHash = m.hash[h] m.hash[h] = e } + return e } - // Add new entry. + // Add a new entry. if len(m.free) < 1 { m.free = make([]profMapEntry[PREV, ACC], 128) } @@ -71,12 +72,11 @@ Search: e.stk = m.freeStk[:len(stk):len(stk)] m.freeStk = m.freeStk[len(stk):] - for j := range stk { - e.stk[j] = uintptr(stk[j]) - } + copy(e.stk, stk) if m.hash == nil { m.hash = make(map[uintptr]*profMapEntry[PREV, ACC]) } m.hash[h] = e + return e } diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go20.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go20.go index fc5020777c2d6..5741ba1ce8abb 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go20.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go20.go @@ -12,6 +12,6 @@ func ScaleMutexProfile(_ MutexProfileScaler, cnt int64, ns float64) (int64, floa return cnt, ns } -var ScalerMutexProfile = MutexProfileScaler{} +var ScalerMutexProfile = MutexProfileScaler{} //nolint:gochecknoglobals -var ScalerBlockProfile = MutexProfileScaler{} +var ScalerBlockProfile = MutexProfileScaler{} //nolint:gochecknoglobals diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/proto.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/proto.go index 78246b0fbbe1f..8fdea5a93bc12 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/proto.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/proto.go @@ -12,17 +12,14 @@ import ( "strconv" "strings" "time" -) -// lostProfileEvent is the function to which lost profiling -// events are attributed. -// (The name shows up in the pprof graphs.) -func lostProfileEvent() { lostProfileEvent() } + "github.com/klauspost/compress/gzip" +) type ProfileBuilderOptions struct { // for go1.21+ if true - use runtime_FrameSymbolName - produces frames with generic types, for example [go.shape.int] - // for go1.21+ if false - use runtime.Frame->Function - produces frames with generic types ommited [...] - // pre 1.21 - always use runtime.Frame->Function - produces frames with generic types ommited [...] + // for go1.21+ if false - use runtime.Frame->Function - produces frames with generic types omitted [...] + // pre 1.21 - always use runtime.Frame->Function - produces frames with generic types omitted [...] GenericsFrames bool LazyMapping bool mem []memMap @@ -32,6 +29,7 @@ func (d *ProfileBuilderOptions) mapping() []memMap { if d.mem == nil || !d.LazyMapping { d.mem = readMapping() } + return d.mem } @@ -45,7 +43,7 @@ type profileBuilder struct { // encoding state w io.Writer - zw gzipWriter + zw *gzip.Writer pb protobuf strings []string stringMap map[string]int @@ -152,13 +150,14 @@ func (b *profileBuilder) stringIndex(s string) int64 { b.strings = append(b.strings, s) b.stringMap[s] = id } + return int64(id) } func (b *profileBuilder) flush() { const dataFlush = 4096 if b.pb.nest == 0 && len(b.pb.data) > dataFlush { - b.zw.Write(b.pb.data) + _, _ = b.zw.Write(b.pb.data) b.pb.data = b.pb.data[:0] } } @@ -249,6 +248,7 @@ func allFrames(addr uintptr) ([]runtime.Frame, symbolizeFlag) { frame, more = frames.Next() ret = append(ret, frame) } + return ret, symbolizeResult } @@ -271,8 +271,7 @@ type locInfo struct { // CPU profiling data obtained from the runtime can be added // by calling b.addCPUData, and then the eventual profile // can be obtained by calling b.finish. -func NewProfileBuilder(w io.Writer, opt *ProfileBuilderOptions, stc ProfileConfig) ProfileBuilder { - zw := newGzipWriter(w) +func NewProfileBuilder(w io.Writer, zw *gzip.Writer, opt *ProfileBuilderOptions, stc ProfileConfig) ProfileBuilder { b := &profileBuilder{ w: w, zw: zw, @@ -293,6 +292,7 @@ func NewProfileBuilder(w io.Writer, opt *ProfileBuilderOptions, stc ProfileConfi if stc.DefaultSampleType != "" { b.pb.int64Opt(tagProfile_DefaultSampleType, b.stringIndex(stc.DefaultSampleType)) } + return b } @@ -310,16 +310,16 @@ func (b *profileBuilder) Build() { } for i, m := range b.mem { - hasFunctions := m.funcs == lookupTried // lookupTried but not lookupFailed - b.pbMapping(tagProfile_Mapping, uint64(i+1), uint64(m.start), uint64(m.end), m.offset, m.file, m.buildID, hasFunctions) + hasFunctions := m.funcs == lookupTried //nolint:lll // lookupTried but not lookupFailed + b.pbMapping(tagProfile_Mapping, uint64(i+1), uint64(m.start), uint64(m.end), m.offset, m.file, m.buildID, hasFunctions) //nolint:lll,gosec } // TODO: Anything for tagProfile_DropFrames? // TODO: Anything for tagProfile_KeepFrames? b.pb.strings(tagProfile_StringTable, b.strings) - b.zw.Write(b.pb.data) - b.zw.Close() + _, _ = b.zw.Write(b.pb.data) + _ = b.zw.Close() } // LocsForStack appends the location IDs for the given stack trace to the given @@ -355,6 +355,7 @@ func (b *profileBuilder) LocsForStack(stk []uintptr) (newLocs []uint64) { if len(b.deck.pcs) > 0 { if added := b.deck.tryAdd(addr, l.firstPCFrames, l.firstPCSymbolizeResult); added { stk = stk[1:] + continue } } @@ -373,6 +374,7 @@ func (b *profileBuilder) LocsForStack(stk []uintptr) (newLocs []uint64) { // limit, expandFinalInlineFrame above has already // fixed the truncation, ensuring it is long enough. stk = stk[len(l.pcs):] + continue } @@ -382,11 +384,13 @@ func (b *profileBuilder) LocsForStack(stk []uintptr) (newLocs []uint64) { locs = append(locs, id) } stk = stk[1:] + continue } if added := b.deck.tryAdd(addr, frames, symbolizeResult); added { stk = stk[1:] + continue } // add failed because this addr is not inlined with the @@ -408,6 +412,7 @@ func (b *profileBuilder) LocsForStack(stk []uintptr) (newLocs []uint64) { if id := b.emitLocation(); id > 0 { // emit remaining location. locs = append(locs, id) } + return locs } @@ -447,8 +452,9 @@ func (b *profileBuilder) LocsForStack(stk []uintptr) (newLocs []uint64) { // have the following properties: // // Frame's Func is nil (note: also true for non-Go functions), and -// Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and -// Frame's Name does not match its entry function frame's name (note: inlined functions cannot be directly recursive). +// Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go +// functions), and Frame's Name does not match its entry function frame's name (note: inlined functions cannot be +// directly recursive). // // As reading and processing the pcs in a stack trace one by one (from leaf to the root), // we use pcDeck to temporarily hold the observed pcs and their expanded frames @@ -504,6 +510,7 @@ func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symb d.firstPCFrames = len(d.frames) d.firstPCSymbolizeResult = symbolizeResult } + return true } @@ -541,13 +548,14 @@ func (b *profileBuilder) emitLocation() uint64 { start := b.pb.startMessage() b.pb.uint64Opt(tagLocation_ID, id) b.pb.uint64Opt(tagLocation_Address, uint64(firstFrame.PC)) - for _, frame := range b.deck.frames { + for k := range b.deck.frames { + frame := &b.deck.frames[k] // Write out each line in frame expansion. - funcName := runtime_FrameSymbolName(&frame) - funcID := uint64(b.funcs[funcName]) + funcName := runtime_FrameSymbolName(frame) + funcID := uint64(b.funcs[funcName]) //nolint:gosec if funcID == 0 { funcID = uint64(len(b.funcs)) + 1 - b.funcs[funcName] = int(funcID) + b.funcs[funcName] = int(funcID) //nolint:gosec var name string if b.opt.GenericsFrames { name = funcName @@ -558,18 +566,19 @@ func (b *profileBuilder) emitLocation() uint64 { id: funcID, name: name, file: frame.File, - startLine: int64(runtime_FrameStartLine(&frame)), + startLine: int64(runtime_FrameStartLine(frame)), }) } b.pbLine(tagLocation_Line, funcID, int64(frame.Line)) } for i := range b.mem { if b.mem[i].start <= addr && addr < b.mem[i].end || b.mem[i].fake { - b.pb.uint64Opt(tagLocation_MappingID, uint64(i+1)) + b.pb.uint64Opt(tagLocation_MappingID, uint64(i+1)) //nolint:gosec m := b.mem[i] m.funcs |= b.deck.symbolizeResult b.mem[i] = m + break } } @@ -587,6 +596,7 @@ func (b *profileBuilder) emitLocation() uint64 { } b.flush() + return id } @@ -613,11 +623,12 @@ func readMapping() []memMap { fake: true, }} } + return mem } -var space = []byte(" ") -var newline = []byte("\n") +var space = []byte(" ") //nolint:gochecknoglobals +var newline = []byte("\n") //nolint:gochecknoglobals func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, buildID string)) { // $ cat /proc/self/maps @@ -648,6 +659,7 @@ func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, var f []byte f, line, _ = bytesCut(line, space) line = bytes.TrimLeft(line, " ") + return f } @@ -716,10 +728,13 @@ func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, // If sep does not appear in s, cut returns s, nil, false. // // Cut returns slices of the original slice s, not copies. +// +//nolint:unparam func bytesCut(s, sep []byte) (before, after []byte, found bool) { if i := bytes.Index(s, sep); i >= 0 { return s[:i], s[i+len(sep):], true } + return s, nil, false } @@ -731,5 +746,6 @@ func stringsCut(s, sep string) (before, after string, found bool) { if i := strings.Index(s, sep); i >= 0 { return s[:i], s[i+len(sep):], true } + return s, "", false } diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/protobuf.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/protobuf.go index 7b99095a13a62..d450862e5ba15 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/protobuf.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/protobuf.go @@ -19,14 +19,14 @@ func (b *protobuf) varint(x uint64) { b.data = append(b.data, byte(x)) } -func (b *protobuf) length(tag int, len int) { - b.varint(uint64(tag)<<3 | 2) - b.varint(uint64(len)) +func (b *protobuf) length(tag int, l int) { + b.varint(uint64(tag)<<3 | 2) //nolint:gosec + b.varint(uint64(l)) //nolint:gosec } func (b *protobuf) uint64(tag int, x uint64) { // append varint to b.data - b.varint(uint64(tag)<<3 | 0) + b.varint(uint64(tag) << 3) //nolint:gosec b.varint(x) } @@ -43,6 +43,7 @@ func (b *protobuf) uint64s(tag int, x []uint64) { copy(b.tmp[:], b.data[n2:n3]) copy(b.data[n1+(n3-n2):], b.data[n1:n2]) copy(b.data[n1:], b.tmp[:n3-n2]) + return } for _, u := range x { @@ -58,7 +59,7 @@ func (b *protobuf) uint64Opt(tag int, x uint64) { } func (b *protobuf) int64(tag int, x int64) { - u := uint64(x) + u := uint64(x) //nolint:gosec b.uint64(tag, u) } @@ -74,7 +75,7 @@ func (b *protobuf) int64s(tag int, x []int64) { // Use packed encoding n1 := len(b.data) for _, u := range x { - b.varint(uint64(u)) + b.varint(uint64(u)) //nolint:gosec } n2 := len(b.data) b.length(tag, n2-n1) @@ -82,6 +83,7 @@ func (b *protobuf) int64s(tag int, x []int64) { copy(b.tmp[:], b.data[n2:n3]) copy(b.data[n1+(n3-n2):], b.data[n1:n2]) copy(b.data[n1:], b.tmp[:n3-n2]) + return } for _, u := range x { @@ -100,13 +102,6 @@ func (b *protobuf) strings(tag int, x []string) { } } -func (b *protobuf) stringOpt(tag int, x string) { - if x == "" { - return - } - b.string(tag, x) -} - func (b *protobuf) bool(tag int, x bool) { if x { b.uint64(tag, 1) @@ -115,17 +110,11 @@ func (b *protobuf) bool(tag int, x bool) { } } -func (b *protobuf) boolOpt(tag int, x bool) { - if x == false { - return - } - b.bool(tag, x) -} - type msgOffset int func (b *protobuf) startMessage() msgOffset { b.nest++ + return msgOffset(len(b.data)) } diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/proto.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/proto.go index 2107389419c5e..b499124142afe 100644 --- a/vendor/github.com/grafana/pyroscope-go/godeltaprof/proto.go +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/proto.go @@ -2,8 +2,8 @@ package godeltaprof type ProfileOptions struct { // for go1.21+ if true - use runtime_FrameSymbolName - produces frames with generic types, for example [go.shape.int] - // for go1.21+ if false - use runtime.Frame->Function - produces frames with generic types ommited [...] - // pre 1.21 - always use runtime.Frame->Function - produces frames with generic types ommited [...] + // for go1.21+ if false - use runtime.Frame->Function - produces frames with generic types omitted [...] + // pre 1.21 - always use runtime.Frame->Function - produces frames with generic types omitted [...] GenericsFrames bool LazyMappings bool } diff --git a/vendor/github.com/hashicorp/go-msgpack/LICENSE b/vendor/github.com/hashicorp/go-msgpack/LICENSE deleted file mode 100644 index ccae99f6a9a30..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2012, 2013 Ugorji Nwoke. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the author nor the names of its contributors may be used - to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go deleted file mode 100644 index c14d810a73e81..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -/* -High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -Representative Benchmark Results - -Run the benchmark suite using: - go test -bi -bench=. -benchmem - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext_dep_test.go - -*/ -package codec diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/README.md b/vendor/github.com/hashicorp/go-msgpack/codec/README.md deleted file mode 100644 index 6c95d1bfd2081..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/README.md +++ /dev/null @@ -1,174 +0,0 @@ -# Codec - -High Performance and Feature-Rich Idiomatic Go Library providing -encode/decode support for different serialization formats. - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -Online documentation: [http://godoc.org/github.com/ugorji/go/codec] - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -## Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -## RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -## Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -## Representative Benchmark Results - -A sample run of benchmark using "go test -bi -bench=. -benchmem": - - /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) - - .............................................. - BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT - To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." - Benchmark: - Struct recursive Depth: 1 - ApproxDeepSize Of benchmark Struct: 4694 bytes - Benchmark One-Pass Run: - v-msgpack: len: 1600 bytes - bson: len: 3025 bytes - msgpack: len: 1560 bytes - binc: len: 1187 bytes - gob: len: 1972 bytes - json: len: 2538 bytes - .............................................. - PASS - Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op - Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op - Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op - Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op - Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op - Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op - Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op - Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op - Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op - Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op - Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op - Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op - Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op - Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op - ok ugorji.net/codec 30.827s - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext\_dep\_test.go - diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go deleted file mode 100644 index 2bb5e8fee8548..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go +++ /dev/null @@ -1,786 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "math" - // "reflect" - // "sync/atomic" - "time" - //"fmt" -) - -const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. - -//var _ = fmt.Printf - -// vd as low 4 bits (there are 16 slots) -const ( - bincVdSpecial byte = iota - bincVdPosInt - bincVdNegInt - bincVdFloat - - bincVdString - bincVdByteArray - bincVdArray - bincVdMap - - bincVdTimestamp - bincVdSmallInt - bincVdUnicodeOther - bincVdSymbol - - bincVdDecimal - _ // open slot - _ // open slot - bincVdCustomExt = 0x0f -) - -const ( - bincSpNil byte = iota - bincSpFalse - bincSpTrue - bincSpNan - bincSpPosInf - bincSpNegInf - bincSpZeroFloat - bincSpZero - bincSpNegOne -) - -const ( - bincFlBin16 byte = iota - bincFlBin32 - _ // bincFlBin32e - bincFlBin64 - _ // bincFlBin64e - // others not currently supported -) - -type bincEncDriver struct { - w encWriter - m map[string]uint16 // symbols - s uint32 // symbols sequencer - b [8]byte -} - -func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - bs := encodeTime(v.(time.Time)) - e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) - e.w.writeb(bs) - } -} - -func (e *bincEncDriver) encodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) -} - -func (e *bincEncDriver) encodeBool(b bool) { - if b { - e.w.writen1(bincVdSpecial<<4 | bincSpTrue) - } else { - e.w.writen1(bincVdSpecial<<4 | bincSpFalse) - } -} - -func (e *bincEncDriver) encodeFloat32(f float32) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - e.w.writen1(bincVdFloat<<4 | bincFlBin32) - e.w.writeUint32(math.Float32bits(f)) -} - -func (e *bincEncDriver) encodeFloat64(f float64) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - bigen.PutUint64(e.b[:], math.Float64bits(f)) - if bincDoPrune { - i := 7 - for ; i >= 0 && (e.b[i] == 0); i-- { - } - i++ - if i <= 6 { - e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) - e.w.writen1(byte(i)) - e.w.writeb(e.b[:i]) - return - } - } - e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:]) -} - -func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { - if lim == 4 { - bigen.PutUint32(e.b[:lim], uint32(v)) - } else { - bigen.PutUint64(e.b[:lim], v) - } - if bincDoPrune { - i := pruneSignExt(e.b[:lim], pos) - e.w.writen1(bd | lim - 1 - byte(i)) - e.w.writeb(e.b[i:lim]) - } else { - e.w.writen1(bd | lim - 1) - e.w.writeb(e.b[:lim]) - } -} - -func (e *bincEncDriver) encodeInt(v int64) { - const nbd byte = bincVdNegInt << 4 - switch { - case v >= 0: - e.encUint(bincVdPosInt<<4, true, uint64(v)) - case v == -1: - e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - default: - e.encUint(bincVdNegInt<<4, false, uint64(-v)) - } -} - -func (e *bincEncDriver) encodeUint(v uint64) { - e.encUint(bincVdPosInt<<4, true, v) -} - -func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - switch { - case v == 0: - e.w.writen1(bincVdSpecial<<4 | bincSpZero) - case pos && v >= 1 && v <= 16: - e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - case v <= math.MaxUint8: - e.w.writen2(bd|0x0, byte(v)) - case v <= math.MaxUint16: - e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: - e.encIntegerPrune(bd, pos, v, 4) - default: - e.encIntegerPrune(bd, pos, v, 8) - } -} - -func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(bincVdCustomExt<<4, uint64(length)) - e.w.writen1(xtag) -} - -func (e *bincEncDriver) encodeArrayPreamble(length int) { - e.encLen(bincVdArray<<4, uint64(length)) -} - -func (e *bincEncDriver) encodeMapPreamble(length int) { - e.encLen(bincVdMap<<4, uint64(length)) -} - -func (e *bincEncDriver) encodeString(c charEncoding, v string) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) encodeSymbol(v string) { - // if WriteSymbolsNoRefs { - // e.encodeString(c_UTF8, v) - // return - // } - - //symbols only offer benefit when string length > 1. - //This is because strings with length 1 take only 2 bytes to store - //(bd with embedded length, and single byte for string val). - - l := len(v) - switch l { - case 0: - e.encBytesLen(c_UTF8, 0) - return - case 1: - e.encBytesLen(c_UTF8, 1) - e.w.writen1(v[0]) - return - } - if e.m == nil { - e.m = make(map[string]uint16, 16) - } - ui, ok := e.m[v] - if ok { - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8) - e.w.writeUint16(ui) - } - } else { - e.s++ - ui = uint16(e.s) - //ui = uint16(atomic.AddUint32(&e.s, 1)) - e.m[v] = ui - var lenprec uint8 - switch { - case l <= math.MaxUint8: - // lenprec = 0 - case l <= math.MaxUint16: - lenprec = 1 - case int64(l) <= math.MaxUint32: - lenprec = 2 - default: - lenprec = 3 - } - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - e.w.writeUint16(ui) - } - switch lenprec { - case 0: - e.w.writen1(byte(l)) - case 1: - e.w.writeUint16(uint16(l)) - case 2: - e.w.writeUint32(uint32(l)) - default: - e.w.writeUint64(uint64(l)) - } - e.w.writestr(v) - } -} - -func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writeb(v) - } -} - -func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { - //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == c_RAW { - e.encLen(bincVdByteArray<<4, length) - } else { - e.encLen(bincVdString<<4, length) - } -} - -func (e *bincEncDriver) encLen(bd byte, l uint64) { - if l < 12 { - e.w.writen1(bd | uint8(l+4)) - } else { - e.encLenNumber(bd, l) - } -} - -func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - switch { - case v <= math.MaxUint8: - e.w.writen2(bd, byte(v)) - case v <= math.MaxUint16: - e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: - e.w.writen1(bd | 0x02) - e.w.writeUint32(uint32(v)) - default: - e.w.writen1(bd | 0x03) - e.w.writeUint64(uint64(v)) - } -} - -//------------------------------------ - -type bincDecDriver struct { - r decReader - bdRead bool - bdType valueType - bd byte - vd byte - vs byte - b [8]byte - m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) -} - -func (d *bincDecDriver) initReadNext() { - if d.bdRead { - return - } - d.bd = d.r.readn1() - d.vd = d.bd >> 4 - d.vs = d.bd & 0x0f - d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *bincDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - d.bdType = valueTypeNil - case bincSpFalse, bincSpTrue: - d.bdType = valueTypeBool - case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: - d.bdType = valueTypeFloat - case bincSpZero: - d.bdType = valueTypeUint - case bincSpNegOne: - d.bdType = valueTypeInt - default: - decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - d.bdType = valueTypeUint - case bincVdPosInt: - d.bdType = valueTypeUint - case bincVdNegInt: - d.bdType = valueTypeInt - case bincVdFloat: - d.bdType = valueTypeFloat - case bincVdString: - d.bdType = valueTypeString - case bincVdSymbol: - d.bdType = valueTypeSymbol - case bincVdByteArray: - d.bdType = valueTypeBytes - case bincVdTimestamp: - d.bdType = valueTypeTimestamp - case bincVdCustomExt: - d.bdType = valueTypeExt - case bincVdArray: - d.bdType = valueTypeArray - case bincVdMap: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) - } - } - return d.bdType -} - -func (d *bincDecDriver) tryDecodeAsNil() bool { - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return true - } - return false -} - -func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - if d.vd != bincVdTimestamp { - decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) - } - tt, err := decodeTime(d.r.readn(int(d.vs))) - if err != nil { - panic(err) - } - var vt *time.Time = v.(*time.Time) - *vt = tt - d.bdRead = false - } -} - -func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { - if vs&0x8 == 0 { - d.r.readb(d.b[0:defaultLen]) - } else { - l := d.r.readn1() - if l > 8 { - decErr("At most 8 bytes used to represent float. Received: %v bytes", l) - } - for i := l; i < 8; i++ { - d.b[i] = 0 - } - d.r.readb(d.b[0:l]) - } -} - -func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(d.r.readUint64()); break; } - switch vs := d.vs; vs & 0x7 { - case bincFlBin32: - d.decFloatPre(vs, 4) - f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - case bincFlBin64: - d.decFloatPre(vs, 8) - f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - default: - decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) - } - return -} - -func (d *bincDecDriver) decUint() (v uint64) { - // need to inline the code (interface conversion and type assertion expensive) - switch d.vs { - case 0: - v = uint64(d.r.readn1()) - case 1: - d.r.readb(d.b[6:]) - v = uint64(bigen.Uint16(d.b[6:])) - case 2: - d.b[4] = 0 - d.r.readb(d.b[5:]) - v = uint64(bigen.Uint32(d.b[4:])) - case 3: - d.r.readb(d.b[4:]) - v = uint64(bigen.Uint32(d.b[4:])) - case 4, 5, 6: - lim := int(7 - d.vs) - d.r.readb(d.b[lim:]) - for i := 0; i < lim; i++ { - d.b[i] = 0 - } - v = uint64(bigen.Uint64(d.b[:])) - case 7: - d.r.readb(d.b[:]) - v = uint64(bigen.Uint64(d.b[:])) - default: - decErr("unsigned integers with greater than 64 bits of precision not supported") - } - return -} - -func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { - switch d.vd { - case bincVdPosInt: - ui = d.decUint() - i = int64(ui) - case bincVdNegInt: - ui = d.decUint() - i = -(int64(ui)) - neg = true - case bincVdSmallInt: - i = int64(d.vs) + 1 - ui = uint64(d.vs) + 1 - case bincVdSpecial: - switch d.vs { - case bincSpZero: - //i = 0 - case bincSpNegOne: - neg = true - ui = 1 - i = -1 - default: - decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) - } - default: - decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) - } - return -} - -func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() - if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - checkOverflow(ui, 0, bitsize) - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.vd { - case bincVdSpecial: - d.bdRead = false - switch d.vs { - case bincSpNan: - return math.NaN() - case bincSpPosInf: - return math.Inf(1) - case bincSpZeroFloat, bincSpZero: - return - case bincSpNegInf: - return math.Inf(-1) - default: - decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) - } - case bincVdFloat: - f = d.decFloat() - default: - _, i, _ := d.decIntAny() - f = float64(i) - } - checkOverflowFloat32(f, chkOverflow32) - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *bincDecDriver) decodeBool() (b bool) { - switch d.bd { - case (bincVdSpecial | bincSpFalse): - // b = false - case (bincVdSpecial | bincSpTrue): - b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) readMapLen() (length int) { - if d.vd != bincVdMap { - decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) readArrayLen() (length int) { - if d.vd != bincVdArray { - decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) decLen() int { - if d.vs <= 3 { - return int(d.decUint()) - } - return int(d.vs - 4) -} - -func (d *bincDecDriver) decodeString() (s string) { - switch d.vd { - case bincVdString, bincVdByteArray: - if length := d.decLen(); length > 0 { - s = string(d.r.readn(length)) - } - case bincVdSymbol: - //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, - //extract symbol - //if containsStringVal, read it and put in map - //else look in map for string value - var symbol uint32 - vs := d.vs - //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) - if vs&0x8 == 0 { - symbol = uint32(d.r.readn1()) - } else { - symbol = uint32(d.r.readUint16()) - } - if d.m == nil { - d.m = make(map[uint32]string, 16) - } - - if vs&0x4 == 0 { - s = d.m[symbol] - } else { - var slen int - switch vs & 0x3 { - case 0: - slen = int(d.r.readn1()) - case 1: - slen = int(d.r.readUint16()) - case 2: - slen = int(d.r.readUint32()) - case 3: - slen = int(d.r.readUint64()) - } - s = string(d.r.readn(slen)) - d.m[symbol] = s - } - default: - decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, bincVdSymbol, d.vd) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - var clen int - switch d.vd { - case bincVdString, bincVdByteArray: - clen = d.decLen() - default: - decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, d.vd) - } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - switch d.vd { - case bincVdCustomExt: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - xbs = d.r.readn(l) - case bincVdByteArray: - xbs, _ = d.decodeBytes(nil) - default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() - - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - vt = valueTypeNil - case bincSpFalse: - vt = valueTypeBool - v = false - case bincSpTrue: - vt = valueTypeBool - v = true - case bincSpNan: - vt = valueTypeFloat - v = math.NaN() - case bincSpPosInf: - vt = valueTypeFloat - v = math.Inf(1) - case bincSpNegInf: - vt = valueTypeFloat - v = math.Inf(-1) - case bincSpZeroFloat: - vt = valueTypeFloat - v = float64(0) - case bincSpZero: - vt = valueTypeUint - v = int64(0) // int8(0) - case bincSpNegOne: - vt = valueTypeInt - v = int64(-1) // int8(-1) - default: - decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - vt = valueTypeUint - v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 - case bincVdPosInt: - vt = valueTypeUint - v = d.decUint() - case bincVdNegInt: - vt = valueTypeInt - v = -(int64(d.decUint())) - case bincVdFloat: - vt = valueTypeFloat - v = d.decFloat() - case bincVdSymbol: - vt = valueTypeSymbol - v = d.decodeString() - case bincVdString: - vt = valueTypeString - v = d.decodeString() - case bincVdByteArray: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) - case bincVdTimestamp: - vt = valueTypeTimestamp - tt, err := decodeTime(d.r.readn(int(d.vs))) - if err != nil { - panic(err) - } - v = tt - case bincVdCustomExt: - vt = valueTypeExt - l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt - case bincVdArray: - vt = valueTypeArray - decodeFurther = true - case bincVdMap: - vt = valueTypeMap - decodeFurther = true - default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -//BincHandle is a Handle for the Binc Schema-Free Encoding Format -//defined at https://github.com/ugorji/binc . -// -//BincHandle currently supports all Binc features with the following EXCEPTIONS: -// - only integers up to 64 bits of precision are supported. -// big integers are unsupported. -// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). -// extended precision and decimal IEEE 754 floats are unsupported. -// - Only UTF-8 strings supported. -// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. -//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. -type BincHandle struct { - BasicHandle -} - -func (h *BincHandle) newEncDriver(w encWriter) encDriver { - return &bincEncDriver{w: w} -} - -func (h *BincHandle) newDecDriver(r decReader) decDriver { - return &bincDecDriver{r: r} -} - -func (_ *BincHandle) writeExt() bool { - return true -} - -func (h *BincHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go deleted file mode 100644 index 851b54ac7e776..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go +++ /dev/null @@ -1,1048 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "io" - "reflect" - // "runtime/debug" -) - -// Some tagging information for error messages. -const ( - msgTagDec = "codec.decoder" - msgBadDesc = "Unrecognized descriptor byte" - msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" -) - -// decReader abstracts the reading source, allowing implementations that can -// read from an io.Reader or directly off a byte slice with zero-copying. -type decReader interface { - readn(n int) []byte - readb([]byte) - readn1() uint8 - readUint16() uint16 - readUint32() uint32 - readUint64() uint64 -} - -type decDriver interface { - initReadNext() - tryDecodeAsNil() bool - currentEncodedType() valueType - isBuiltinType(rt uintptr) bool - decodeBuiltin(rt uintptr, v interface{}) - //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - decodeNaked() (v interface{}, vt valueType, decodeFurther bool) - decodeInt(bitsize uint8) (i int64) - decodeUint(bitsize uint8) (ui uint64) - decodeFloat(chkOverflow32 bool) (f float64) - decodeBool() (b bool) - // decodeString can also decode symbols - decodeString() (s string) - decodeBytes(bs []byte) (bsOut []byte, changed bool) - decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - readMapLen() int - readArrayLen() int -} - -type DecodeOptions struct { - // An instance of MapType is used during schema-less decoding of a map in the stream. - // If nil, we use map[interface{}]interface{} - MapType reflect.Type - // An instance of SliceType is used during schema-less decoding of an array in the stream. - // If nil, we use []interface{} - SliceType reflect.Type - // ErrorIfNoField controls whether an error is returned when decoding a map - // from a codec stream into a struct, and no matching struct field is found. - ErrorIfNoField bool -} - -// ------------------------------------ - -// ioDecReader is a decReader that reads off an io.Reader -type ioDecReader struct { - r io.Reader - br io.ByteReader - x [8]byte //temp byte array re-used internally for efficiency -} - -func (z *ioDecReader) readn(n int) (bs []byte) { - if n <= 0 { - return - } - bs = make([]byte, n) - if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { - panic(err) - } - return -} - -func (z *ioDecReader) readb(bs []byte) { - if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { - panic(err) - } -} - -func (z *ioDecReader) readn1() uint8 { - if z.br != nil { - b, err := z.br.ReadByte() - if err != nil { - panic(err) - } - return b - } - z.readb(z.x[:1]) - return z.x[0] -} - -func (z *ioDecReader) readUint16() uint16 { - z.readb(z.x[:2]) - return bigen.Uint16(z.x[:2]) -} - -func (z *ioDecReader) readUint32() uint32 { - z.readb(z.x[:4]) - return bigen.Uint32(z.x[:4]) -} - -func (z *ioDecReader) readUint64() uint64 { - z.readb(z.x[:8]) - return bigen.Uint64(z.x[:8]) -} - -// ------------------------------------ - -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c int // cursor - a int // available -} - -func (z *bytesDecReader) consume(n int) (oldcursor int) { - if z.a == 0 { - panic(io.EOF) - } - if n > z.a { - decErr("Trying to read %v bytes. Only %v available", n, z.a) - } - // z.checkAvailable(n) - oldcursor = z.c - z.c = oldcursor + n - z.a = z.a - n - return -} - -func (z *bytesDecReader) readn(n int) (bs []byte) { - if n <= 0 { - return - } - c0 := z.consume(n) - bs = z.b[c0:z.c] - return -} - -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readn(len(bs))) -} - -func (z *bytesDecReader) readn1() uint8 { - c0 := z.consume(1) - return z.b[c0] -} - -// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits -// creating temp slice variable and copying it to helper function is expensive -// for just 2 bits. - -func (z *bytesDecReader) readUint16() uint16 { - c0 := z.consume(2) - return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 -} - -func (z *bytesDecReader) readUint32() uint32 { - c0 := z.consume(4) - return bigen.Uint32(z.b[c0:z.c]) -} - -func (z *bytesDecReader) readUint64() uint64 { - c0 := z.consume(8) - return bigen.Uint64(z.b[c0:z.c]) -} - -// ------------------------------------ - -// decFnInfo has methods for registering handling decoding of a specific type -// based on some characteristics (builtin, extension, reflect Kind, etc) -type decFnInfo struct { - ti *typeInfo - d *Decoder - dd decDriver - xfFn func(reflect.Value, []byte) error - xfTag byte - array bool -} - -func (f *decFnInfo) builtin(rv reflect.Value) { - f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) -} - -func (f *decFnInfo) rawExt(rv reflect.Value) { - xtag, xbs := f.dd.decodeExt(false, 0) - rv.Field(0).SetUint(uint64(xtag)) - rv.Field(1).SetBytes(xbs) -} - -func (f *decFnInfo) ext(rv reflect.Value) { - _, xbs := f.dd.decodeExt(true, f.xfTag) - if fnerr := f.xfFn(rv, xbs); fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryUnmarshaler - if f.ti.unmIndir == -1 { - bm = rv.Addr().Interface().(binaryUnmarshaler) - } else if f.ti.unmIndir == 0 { - bm = rv.Interface().(binaryUnmarshaler) - } else { - for j, k := int8(0), f.ti.unmIndir; j < k; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - bm = rv.Interface().(binaryUnmarshaler) - } - xbs, _ := f.dd.decodeBytes(nil) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) kErr(rv reflect.Value) { - decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) -} - -func (f *decFnInfo) kString(rv reflect.Value) { - rv.SetString(f.dd.decodeString()) -} - -func (f *decFnInfo) kBool(rv reflect.Value) { - rv.SetBool(f.dd.decodeBool()) -} - -func (f *decFnInfo) kInt(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(intBitsize)) -} - -func (f *decFnInfo) kInt64(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(64)) -} - -func (f *decFnInfo) kInt32(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(32)) -} - -func (f *decFnInfo) kInt8(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(8)) -} - -func (f *decFnInfo) kInt16(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(16)) -} - -func (f *decFnInfo) kFloat32(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(true)) -} - -func (f *decFnInfo) kFloat64(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(false)) -} - -func (f *decFnInfo) kUint8(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(8)) -} - -func (f *decFnInfo) kUint64(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(64)) -} - -func (f *decFnInfo) kUint(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUint32(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(32)) -} - -func (f *decFnInfo) kUint16(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(16)) -} - -// func (f *decFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") -// if rv.IsNil() { -// rv.Set(reflect.New(rv.Type().Elem())) -// } -// f.d.decodeValue(rv.Elem()) -// } - -func (f *decFnInfo) kInterface(rv reflect.Value) { - // debugf("\t===> kInterface") - if !rv.IsNil() { - f.d.decodeValue(rv.Elem()) - return - } - // nil interface: - // use some hieristics to set the nil interface to an - // appropriate value based on the first byte read (byte descriptor bd) - v, vt, decodeFurther := f.dd.decodeNaked() - if vt == valueTypeNil { - return - } - // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) - // if non-nil value in stream. - if num := f.ti.rt.NumMethod(); num > 0 { - decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", - f.ti.rt, num) - } - var rvn reflect.Value - var useRvn bool - switch vt { - case valueTypeMap: - if f.d.h.MapType == nil { - var m2 map[interface{}]interface{} - v = &m2 - } else { - rvn = reflect.New(f.d.h.MapType).Elem() - useRvn = true - } - case valueTypeArray: - if f.d.h.SliceType == nil { - var m2 []interface{} - v = &m2 - } else { - rvn = reflect.New(f.d.h.SliceType).Elem() - useRvn = true - } - case valueTypeExt: - re := v.(*RawExt) - var bfn func(reflect.Value, []byte) error - rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) - if bfn == nil { - rvn = reflect.ValueOf(*re) - } else if fnerr := bfn(rvn, re.Data); fnerr != nil { - panic(fnerr) - } - rv.Set(rvn) - return - } - if decodeFurther { - if useRvn { - f.d.decodeValue(rvn) - } else if v != nil { - // this v is a pointer, so we need to dereference it when done - f.d.decode(v) - rvn = reflect.ValueOf(v).Elem() - useRvn = true - } - } - if useRvn { - rv.Set(rvn) - } else if v != nil { - rv.Set(reflect.ValueOf(v)) - } -} - -func (f *decFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { - containerLen := f.dd.readMapLen() - if containerLen == 0 { - return - } - tisfi := fti.sfi - for j := 0; j < containerLen; j++ { - // var rvkencname string - // ddecode(&rvkencname) - f.dd.initReadNext() - rvkencname := f.dd.decodeString() - // rvksi := ti.getForEncName(rvkencname) - if k := fti.indexForEncName(rvkencname); k > -1 { - sfik := tisfi[k] - if sfik.i != -1 { - f.d.decodeValue(rv.Field(int(sfik.i))) - } else { - f.d.decEmbeddedField(rv, sfik.is) - } - // f.d.decodeValue(ti.field(k, rv)) - } else { - if f.d.h.ErrorIfNoField { - decErr("No matching struct field found when decoding stream map with key: %v", - rvkencname) - } else { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) - } - } - } - } else if currEncodedType == valueTypeArray { - containerLen := f.dd.readArrayLen() - if containerLen == 0 { - return - } - for j, si := range fti.sfip { - if j == containerLen { - break - } - if si.i != -1 { - f.d.decodeValue(rv.Field(int(si.i))) - } else { - f.d.decEmbeddedField(rv, si.is) - } - } - if containerLen > len(fti.sfip) { - // read remaining values and throw away - for j := len(fti.sfip); j < containerLen; j++ { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) - } - } - } else { - decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", - currEncodedType) - } -} - -func (f *decFnInfo) kSlice(rv reflect.Value) { - // A slice can be set from a map or array in stream. - currEncodedType := f.dd.currentEncodedType() - - switch currEncodedType { - case valueTypeBytes, valueTypeString: - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { - rv.SetBytes(bs2) - } - return - } - } - - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case intfSliceTypId: - f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) - return - case uint64SliceTypId: - f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) - return - case int64SliceTypId: - f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) - return - case strSliceTypId: - f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) - return - } - } - - containerLen, containerLenS := decContLens(f.dd, currEncodedType) - - // an array can never return a nil slice. so no need to check f.array here. - - if rv.IsNil() { - rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) - } - - if containerLen == 0 { - return - } - - if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { - if f.array { // !rv.CanSet() - decErr(msgDecCannotExpandArr, rvcap, containerLenS) - } - rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) - if rvlen > 0 { - reflect.Copy(rvn, rv) - } - rv.Set(rvn) - } else if containerLenS > rvlen { - rv.SetLen(containerLenS) - } - - for j := 0; j < containerLenS; j++ { - f.d.decodeValue(rv.Index(j)) - } -} - -func (f *decFnInfo) kArray(rv reflect.Value) { - // f.d.decodeValue(rv.Slice(0, rv.Len())) - f.kSlice(rv.Slice(0, rv.Len())) -} - -func (f *decFnInfo) kMap(rv reflect.Value) { - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case mapStrIntfTypId: - f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) - return - case mapIntfIntfTypId: - f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) - return - case mapInt64IntfTypId: - f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) - return - case mapUint64IntfTypId: - f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) - return - } - } - - containerLen := f.dd.readMapLen() - - if rv.IsNil() { - rv.Set(reflect.MakeMap(f.ti.rt)) - } - - if containerLen == 0 { - return - } - - ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() - ktypeId := reflect.ValueOf(ktype).Pointer() - for j := 0; j < containerLen; j++ { - rvk := reflect.New(ktype).Elem() - f.d.decodeValue(rvk) - - // special case if a byte array. - // if ktype == intfTyp { - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(string(rvk.Bytes())) - } - } - rvv := rv.MapIndex(rvk) - if !rvv.IsValid() || !rvv.CanSet() { - rvv = reflect.New(vtype).Elem() - } - - f.d.decodeValue(rvv) - rv.SetMapIndex(rvk, rvv) - } -} - -// ---------------------------------------- - -type decFn struct { - i *decFnInfo - f func(*decFnInfo, reflect.Value) -} - -// A Decoder reads and decodes an object from an input stream in the codec format. -type Decoder struct { - r decReader - d decDriver - h *BasicHandle - f map[uintptr]decFn - x []uintptr - s []decFn -} - -// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Reader, bytes.Buffer). -func NewDecoder(r io.Reader, h Handle) *Decoder { - z := ioDecReader{ - r: r, - } - z.br, _ = r.(io.ByteReader) - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} -} - -// NewDecoderBytes returns a Decoder which efficiently decodes directly -// from a byte slice with zero copying. -func NewDecoderBytes(in []byte, h Handle) *Decoder { - z := bytesDecReader{ - b: in, - a: len(in), - } - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} -} - -// Decode decodes the stream from reader and stores the result in the -// value pointed to by v. v cannot be a nil pointer. v can also be -// a reflect.Value of a pointer. -// -// Note that a pointer to a nil interface is not a nil pointer. -// If you do not know what type of stream it is, pass in a pointer to a nil interface. -// We will decode and store a value in that nil interface. -// -// Sample usages: -// // Decoding into a non-nil typed value -// var f float32 -// err = codec.NewDecoder(r, handle).Decode(&f) -// -// // Decoding into nil interface -// var v interface{} -// dec := codec.NewDecoder(r, handle) -// err = dec.Decode(&v) -// -// When decoding into a nil interface{}, we will decode into an appropriate value based -// on the contents of the stream: -// - Numbers are decoded as float64, int64 or uint64. -// - Other values are decoded appropriately depending on the type: -// bool, string, []byte, time.Time, etc -// - Extensions are decoded as RawExt (if no ext function registered for the tag) -// Configurations exist on the Handle to override defaults -// (e.g. for MapType, SliceType and how to decode raw bytes). -// -// When decoding into a non-nil interface{} value, the mode of encoding is based on the -// type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error -// - Else decode it based on its reflect.Kind -// -// There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container. -// - A map can be decoded from a stream map, by updating matching keys. -// - A slice can be decoded from a stream array, -// by updating the first n elements, where n is length of the stream. -// - A slice can be decoded from a stream map, by decoding as if -// it contains a sequence of key-value pairs. -// - A struct can be decoded from a stream map, by updating matching fields. -// - A struct can be decoded from a stream array, -// by updating fields as they occur in the struct (by index). -// -// When decoding a stream map or array with length of 0 into a nil map or slice, -// we reset the destination map or slice to a zero-length value. -// -// However, when decoding a stream nil, we reset the destination container -// to its "zero" value (e.g. nil for slice/map, etc). -// -func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErr(&err) - d.decode(v) - return -} - -func (d *Decoder) decode(iv interface{}) { - d.d.initReadNext() - - switch v := iv.(type) { - case nil: - decErr("Cannot decode into nil.") - - case reflect.Value: - d.chkPtrValue(v) - d.decodeValue(v.Elem()) - - case *string: - *v = d.d.decodeString() - case *bool: - *v = d.d.decodeBool() - case *int: - *v = int(d.d.decodeInt(intBitsize)) - case *int8: - *v = int8(d.d.decodeInt(8)) - case *int16: - *v = int16(d.d.decodeInt(16)) - case *int32: - *v = int32(d.d.decodeInt(32)) - case *int64: - *v = d.d.decodeInt(64) - case *uint: - *v = uint(d.d.decodeUint(uintBitsize)) - case *uint8: - *v = uint8(d.d.decodeUint(8)) - case *uint16: - *v = uint16(d.d.decodeUint(16)) - case *uint32: - *v = uint32(d.d.decodeUint(32)) - case *uint64: - *v = d.d.decodeUint(64) - case *float32: - *v = float32(d.d.decodeFloat(true)) - case *float64: - *v = d.d.decodeFloat(false) - case *[]byte: - *v, _ = d.d.decodeBytes(*v) - - case *[]interface{}: - d.decSliceIntf(v, valueTypeInvalid, false) - case *[]uint64: - d.decSliceUint64(v, valueTypeInvalid, false) - case *[]int64: - d.decSliceInt64(v, valueTypeInvalid, false) - case *[]string: - d.decSliceStr(v, valueTypeInvalid, false) - case *map[string]interface{}: - d.decMapStrIntf(v) - case *map[interface{}]interface{}: - d.decMapIntfIntf(v) - case *map[uint64]interface{}: - d.decMapUint64Intf(v) - case *map[int64]interface{}: - d.decMapInt64Intf(v) - - case *interface{}: - d.decodeValue(reflect.ValueOf(iv).Elem()) - - default: - rv := reflect.ValueOf(iv) - d.chkPtrValue(rv) - d.decodeValue(rv.Elem()) - } -} - -func (d *Decoder) decodeValue(rv reflect.Value) { - d.d.initReadNext() - - if d.d.tryDecodeAsNil() { - // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) - if rv.Kind() == reflect.Ptr { - if !rv.IsNil() { - rv.Set(reflect.Zero(rv.Type())) - } - return - } - // for rv.Kind() == reflect.Ptr { - // rv = rv.Elem() - // } - if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid - rv.Set(reflect.Zero(rv.Type())) - } - return - } - - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - - // retrieve or register a focus'ed function for this type - // to eliminate need to do the retrieval multiple times - - // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } - var fn decFn - var ok bool - if useMapForCodecCache { - fn, ok = d.f[rtid] - } else { - for i, v := range d.x { - if v == rtid { - fn, ok = d.s[i], true - break - } - } - } - if !ok { - // debugf("\tCreating new dec fn for type: %v\n", rt) - fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} - fn.i = &fi - // An extension can be registered for any type, regardless of the Kind - // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. - // - // We can't check if it's an extension byte here first, because the user may have - // registered a pointer or non-pointer type, meaning we may have to recurse first - // before matching a mapped type, even though the extension byte is already detected. - // - // NOTE: if decoding into a nil interface{}, we return a non-nil - // value except even if the container registers a length of 0. - if rtid == rawExtTypId { - fn.f = (*decFnInfo).rawExt - } else if d.d.isBuiltinType(rtid) { - fn.f = (*decFnInfo).builtin - } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*decFnInfo).ext - } else if supportBinaryMarshal && fi.ti.unm { - fn.f = (*decFnInfo).binaryMarshal - } else { - switch rk := rt.Kind(); rk { - case reflect.String: - fn.f = (*decFnInfo).kString - case reflect.Bool: - fn.f = (*decFnInfo).kBool - case reflect.Int: - fn.f = (*decFnInfo).kInt - case reflect.Int64: - fn.f = (*decFnInfo).kInt64 - case reflect.Int32: - fn.f = (*decFnInfo).kInt32 - case reflect.Int8: - fn.f = (*decFnInfo).kInt8 - case reflect.Int16: - fn.f = (*decFnInfo).kInt16 - case reflect.Float32: - fn.f = (*decFnInfo).kFloat32 - case reflect.Float64: - fn.f = (*decFnInfo).kFloat64 - case reflect.Uint8: - fn.f = (*decFnInfo).kUint8 - case reflect.Uint64: - fn.f = (*decFnInfo).kUint64 - case reflect.Uint: - fn.f = (*decFnInfo).kUint - case reflect.Uint32: - fn.f = (*decFnInfo).kUint32 - case reflect.Uint16: - fn.f = (*decFnInfo).kUint16 - // case reflect.Ptr: - // fn.f = (*decFnInfo).kPtr - case reflect.Interface: - fn.f = (*decFnInfo).kInterface - case reflect.Struct: - fn.f = (*decFnInfo).kStruct - case reflect.Slice: - fn.f = (*decFnInfo).kSlice - case reflect.Array: - fi.array = true - fn.f = (*decFnInfo).kArray - case reflect.Map: - fn.f = (*decFnInfo).kMap - default: - fn.f = (*decFnInfo).kErr - } - } - if useMapForCodecCache { - if d.f == nil { - d.f = make(map[uintptr]decFn, 16) - } - d.f[rtid] = fn - } else { - d.s = append(d.s, fn) - d.x = append(d.x, rtid) - } - } - - fn.f(fn.i, rv) - - return -} - -func (d *Decoder) chkPtrValue(rv reflect.Value) { - // We can only decode into a non-nil pointer - if rv.Kind() == reflect.Ptr && !rv.IsNil() { - return - } - if !rv.IsValid() { - decErr("Cannot decode into a zero (ie invalid) reflect.Value") - } - if !rv.CanInterface() { - decErr("Cannot decode into a value without an interface: %v", rv) - } - rvi := rv.Interface() - decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", - rv.Kind(), rvi, rvi) -} - -func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { - // d.decodeValue(rv.FieldByIndex(index)) - // nil pointers may be here; so reproduce FieldByIndex logic + enhancements - for _, j := range index { - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - // If a pointer, it must be a pointer to struct (based on typeInfo contract) - rv = rv.Elem() - } - rv = rv.Field(j) - } - d.decodeValue(rv) -} - -// -------------------------------------------------- - -// short circuit functions for common maps and slices - -func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]interface{}, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]interface{}, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - d.decode(&s[j]) - } - *v = s -} - -func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]int64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]int64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeInt(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]uint64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]uint64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeUint(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]string, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]string, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeString() - } - *v = s -} - -func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[interface{}]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - var mk interface{} - d.decode(&mk) - // special case if a byte array. - if bv, bok := mk.([]byte); bok { - mk = string(bv) - } - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[int64]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeInt(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[uint64]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeUint(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[string]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeString() - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -// ---------------------------------------- - -func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { - if currEncodedType == valueTypeInvalid { - currEncodedType = dd.currentEncodedType() - } - switch currEncodedType { - case valueTypeArray: - containerLen = dd.readArrayLen() - containerLenS = containerLen - case valueTypeMap: - containerLen = dd.readMapLen() - containerLenS = containerLen * 2 - default: - decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", - currEncodedType) - } - return -} - -func decErr(format string, params ...interface{}) { - doPanic(msgTagDec, format, params...) -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go deleted file mode 100644 index 4914be0c748bf..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go +++ /dev/null @@ -1,1001 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "io" - "reflect" -) - -const ( - // Some tagging information for error messages. - msgTagEnc = "codec.encoder" - defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 - // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 -) - -// AsSymbolFlag defines what should be encoded as symbols. -type AsSymbolFlag uint8 - -const ( - // AsSymbolDefault is default. - // Currently, this means only encode struct field names as symbols. - // The default is subject to change. - AsSymbolDefault AsSymbolFlag = iota - - // AsSymbolAll means encode anything which could be a symbol as a symbol. - AsSymbolAll = 0xfe - - // AsSymbolNone means do not encode anything as a symbol. - AsSymbolNone = 1 << iota - - // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. - AsSymbolMapStringKeysFlag - - // AsSymbolStructFieldName means encode struct field names as symbols. - AsSymbolStructFieldNameFlag -) - -// encWriter abstracting writing to a byte array or to an io.Writer. -type encWriter interface { - writeUint16(uint16) - writeUint32(uint32) - writeUint64(uint64) - writeb([]byte) - writestr(string) - writen1(byte) - writen2(byte, byte) - atEndOfEncode() -} - -// encDriver abstracts the actual codec (binc vs msgpack, etc) -type encDriver interface { - isBuiltinType(rt uintptr) bool - encodeBuiltin(rt uintptr, v interface{}) - encodeNil() - encodeInt(i int64) - encodeUint(i uint64) - encodeBool(b bool) - encodeFloat32(f float32) - encodeFloat64(f float64) - encodeExtPreamble(xtag byte, length int) - encodeArrayPreamble(length int) - encodeMapPreamble(length int) - encodeString(c charEncoding, v string) - encodeSymbol(v string) - encodeStringBytes(c charEncoding, v []byte) - //TODO - //encBignum(f *big.Int) - //encStringRunes(c charEncoding, v []rune) -} - -type ioEncWriterWriter interface { - WriteByte(c byte) error - WriteString(s string) (n int, err error) - Write(p []byte) (n int, err error) -} - -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) -} - -type EncodeOptions struct { - // Encode a struct as an array, and not as a map. - StructToArray bool - - // AsSymbols defines what should be encoded as symbols. - // - // Encoding as symbols can reduce the encoded size significantly. - // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. - // - // Sample values: - // AsSymbolNone - // AsSymbolAll - // AsSymbolMapStringKeys - // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - AsSymbols AsSymbolFlag -} - -// --------------------------------------------- - -type simpleIoEncWriterWriter struct { - w io.Writer - bw io.ByteWriter - sw ioEncStringWriter -} - -func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { - if o.bw != nil { - return o.bw.WriteByte(c) - } - _, err = o.w.Write([]byte{c}) - return -} - -func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { - if o.sw != nil { - return o.sw.WriteString(s) - } - return o.w.Write([]byte(s)) -} - -func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { - return o.w.Write(p) -} - -// ---------------------------------------- - -// ioEncWriter implements encWriter and can write to an io.Writer implementation -type ioEncWriter struct { - w ioEncWriterWriter - x [8]byte // temp byte array re-used internally for efficiency -} - -func (z *ioEncWriter) writeUint16(v uint16) { - bigen.PutUint16(z.x[:2], v) - z.writeb(z.x[:2]) -} - -func (z *ioEncWriter) writeUint32(v uint32) { - bigen.PutUint32(z.x[:4], v) - z.writeb(z.x[:4]) -} - -func (z *ioEncWriter) writeUint64(v uint64) { - bigen.PutUint64(z.x[:8], v) - z.writeb(z.x[:8]) -} - -func (z *ioEncWriter) writeb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := z.w.Write(bs) - if err != nil { - panic(err) - } - if n != len(bs) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) - } -} - -func (z *ioEncWriter) writestr(s string) { - n, err := z.w.WriteString(s) - if err != nil { - panic(err) - } - if n != len(s) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) - } -} - -func (z *ioEncWriter) writen1(b byte) { - if err := z.w.WriteByte(b); err != nil { - panic(err) - } -} - -func (z *ioEncWriter) writen2(b1 byte, b2 byte) { - z.writen1(b1) - z.writen1(b2) -} - -func (z *ioEncWriter) atEndOfEncode() {} - -// ---------------------------------------- - -// bytesEncWriter implements encWriter and can write to an byte slice. -// It is used by Marshal function. -type bytesEncWriter struct { - b []byte - c int // cursor - out *[]byte // write out on atEndOfEncode -} - -func (z *bytesEncWriter) writeUint16(v uint16) { - c := z.grow(2) - z.b[c] = byte(v >> 8) - z.b[c+1] = byte(v) -} - -func (z *bytesEncWriter) writeUint32(v uint32) { - c := z.grow(4) - z.b[c] = byte(v >> 24) - z.b[c+1] = byte(v >> 16) - z.b[c+2] = byte(v >> 8) - z.b[c+3] = byte(v) -} - -func (z *bytesEncWriter) writeUint64(v uint64) { - c := z.grow(8) - z.b[c] = byte(v >> 56) - z.b[c+1] = byte(v >> 48) - z.b[c+2] = byte(v >> 40) - z.b[c+3] = byte(v >> 32) - z.b[c+4] = byte(v >> 24) - z.b[c+5] = byte(v >> 16) - z.b[c+6] = byte(v >> 8) - z.b[c+7] = byte(v) -} - -func (z *bytesEncWriter) writeb(s []byte) { - if len(s) == 0 { - return - } - c := z.grow(len(s)) - copy(z.b[c:], s) -} - -func (z *bytesEncWriter) writestr(s string) { - c := z.grow(len(s)) - copy(z.b[c:], s) -} - -func (z *bytesEncWriter) writen1(b1 byte) { - c := z.grow(1) - z.b[c] = b1 -} - -func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - c := z.grow(2) - z.b[c] = b1 - z.b[c+1] = b2 -} - -func (z *bytesEncWriter) atEndOfEncode() { - *(z.out) = z.b[:z.c] -} - -func (z *bytesEncWriter) grow(n int) (oldcursor int) { - oldcursor = z.c - z.c = oldcursor + n - if z.c > cap(z.b) { - // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). - // However, it was too expensive, causing too many iterations of copy. - // Using bytes.Buffer model was much better (2*cap + n) - bs := make([]byte, 2*cap(z.b)+n) - copy(bs, z.b[:oldcursor]) - z.b = bs - } else if z.c > len(z.b) { - z.b = z.b[:cap(z.b)] - } - return -} - -// --------------------------------------------- - -type encFnInfo struct { - ti *typeInfo - e *Encoder - ee encDriver - xfFn func(reflect.Value) ([]byte, error) - xfTag byte -} - -func (f *encFnInfo) builtin(rv reflect.Value) { - f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) -} - -func (f *encFnInfo) rawExt(rv reflect.Value) { - f.e.encRawExt(rv.Interface().(RawExt)) -} - -func (f *encFnInfo) ext(rv reflect.Value) { - bs, fnerr := f.xfFn(rv) - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - return - } - if f.e.hh.writeExt() { - f.ee.encodeExtPreamble(f.xfTag, len(bs)) - f.e.w.writeb(bs) - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } - -} - -func (f *encFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryMarshaler - if f.ti.mIndir == 0 { - bm = rv.Interface().(binaryMarshaler) - } else if f.ti.mIndir == -1 { - bm = rv.Addr().Interface().(binaryMarshaler) - } else { - for j, k := int8(0), f.ti.mIndir; j < k; j++ { - if rv.IsNil() { - f.ee.encodeNil() - return - } - rv = rv.Elem() - } - bm = rv.Interface().(binaryMarshaler) - } - // debugf(">>>> binaryMarshaler: %T", rv.Interface()) - bs, fnerr := bm.MarshalBinary() - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } -} - -func (f *encFnInfo) kBool(rv reflect.Value) { - f.ee.encodeBool(rv.Bool()) -} - -func (f *encFnInfo) kString(rv reflect.Value) { - f.ee.encodeString(c_UTF8, rv.String()) -} - -func (f *encFnInfo) kFloat64(rv reflect.Value) { - f.ee.encodeFloat64(rv.Float()) -} - -func (f *encFnInfo) kFloat32(rv reflect.Value) { - f.ee.encodeFloat32(float32(rv.Float())) -} - -func (f *encFnInfo) kInt(rv reflect.Value) { - f.ee.encodeInt(rv.Int()) -} - -func (f *encFnInfo) kUint(rv reflect.Value) { - f.ee.encodeUint(rv.Uint()) -} - -func (f *encFnInfo) kInvalid(rv reflect.Value) { - f.ee.encodeNil() -} - -func (f *encFnInfo) kErr(rv reflect.Value) { - encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) -} - -func (f *encFnInfo) kSlice(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case intfSliceTypId: - f.e.encSliceIntf(rv.Interface().([]interface{})) - return - case strSliceTypId: - f.e.encSliceStr(rv.Interface().([]string)) - return - case uint64SliceTypId: - f.e.encSliceUint64(rv.Interface().([]uint64)) - return - case int64SliceTypId: - f.e.encSliceInt64(rv.Interface().([]int64)) - return - } - } - - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - f.ee.encodeStringBytes(c_RAW, rv.Bytes()) - return - } - - l := rv.Len() - if f.ti.mbs { - if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) - } - f.ee.encodeMapPreamble(l / 2) - } else { - f.ee.encodeArrayPreamble(l) - } - if l == 0 { - return - } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) - } -} - -func (f *encFnInfo) kArray(rv reflect.Value) { - // We cannot share kSlice method, because the array may be non-addressable. - // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". - // So we have to duplicate the functionality here. - // f.e.encodeValue(rv.Slice(0, rv.Len())) - // f.kSlice(rv.Slice(0, rv.Len())) - - l := rv.Len() - // Handle an array of bytes specially (in line with what is done for slices) - if f.ti.rt.Elem().Kind() == reflect.Uint8 { - if l == 0 { - f.ee.encodeStringBytes(c_RAW, nil) - return - } - var bs []byte - if rv.CanAddr() { - bs = rv.Slice(0, l).Bytes() - } else { - bs = make([]byte, l) - for i := 0; i < l; i++ { - bs[i] = byte(rv.Index(i).Uint()) - } - } - f.ee.encodeStringBytes(c_RAW, bs) - return - } - - if f.ti.mbs { - if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) - } - f.ee.encodeMapPreamble(l / 2) - } else { - f.ee.encodeArrayPreamble(l) - } - if l == 0 { - return - } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) - } -} - -func (f *encFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - newlen := len(fti.sfi) - rvals := make([]reflect.Value, newlen) - var encnames []string - e := f.e - tisfi := fti.sfip - toMap := !(fti.toArray || e.h.StructToArray) - // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) - if toMap { - tisfi = fti.sfi - encnames = make([]string, newlen) - } - newlen = 0 - for _, si := range tisfi { - if si.i != -1 { - rvals[newlen] = rv.Field(int(si.i)) - } else { - rvals[newlen] = rv.FieldByIndex(si.is) - } - if toMap { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { - continue - } - encnames[newlen] = si.encName - } else { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { - rvals[newlen] = reflect.Value{} //encode as nil - } - } - newlen++ - } - - // debugf(">>>> kStruct: newlen: %v", newlen) - if toMap { - ee := f.ee //don't dereference everytime - ee.encodeMapPreamble(newlen) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - for j := 0; j < newlen; j++ { - if asSymbols { - ee.encodeSymbol(encnames[j]) - } else { - ee.encodeString(c_UTF8, encnames[j]) - } - e.encodeValue(rvals[j]) - } - } else { - f.ee.encodeArrayPreamble(newlen) - for j := 0; j < newlen; j++ { - e.encodeValue(rvals[j]) - } - } -} - -// func (f *encFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") -// if rv.IsNil() { -// f.ee.encodeNil() -// return -// } -// f.e.encodeValue(rv.Elem()) -// } - -func (f *encFnInfo) kInterface(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - f.e.encodeValue(rv.Elem()) -} - -func (f *encFnInfo) kMap(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case mapIntfIntfTypId: - f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) - return - case mapStrIntfTypId: - f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) - return - case mapStrStrTypId: - f.e.encMapStrStr(rv.Interface().(map[string]string)) - return - case mapInt64IntfTypId: - f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) - return - case mapUint64IntfTypId: - f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) - return - } - } - - l := rv.Len() - f.ee.encodeMapPreamble(l) - if l == 0 { - return - } - // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String - keyTypeIsString := f.ti.rt.Key() == stringTyp - var asSymbols bool - if keyTypeIsString { - asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - } - mks := rv.MapKeys() - // for j, lmks := 0, len(mks); j < lmks; j++ { - for j := range mks { - if keyTypeIsString { - if asSymbols { - f.ee.encodeSymbol(mks[j].String()) - } else { - f.ee.encodeString(c_UTF8, mks[j].String()) - } - } else { - f.e.encodeValue(mks[j]) - } - f.e.encodeValue(rv.MapIndex(mks[j])) - } - -} - -// -------------------------------------------------- - -// encFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type encFn struct { - i *encFnInfo - f func(*encFnInfo, reflect.Value) -} - -// -------------------------------------------------- - -// An Encoder writes an object to an output stream in the codec format. -type Encoder struct { - w encWriter - e encDriver - h *BasicHandle - hh Handle - f map[uintptr]encFn - x []uintptr - s []encFn -} - -// NewEncoder returns an Encoder for encoding into an io.Writer. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Writer, bytes.Buffer). -func NewEncoder(w io.Writer, h Handle) *Encoder { - ww, ok := w.(ioEncWriterWriter) - if !ok { - sww := simpleIoEncWriterWriter{w: w} - sww.bw, _ = w.(io.ByteWriter) - sww.sw, _ = w.(ioEncStringWriter) - ww = &sww - //ww = bufio.NewWriterSize(w, defEncByteBufSize) - } - z := ioEncWriter{ - w: ww, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} -} - -// NewEncoderBytes returns an encoder for encoding directly and efficiently -// into a byte slice, using zero-copying to temporary slices. -// -// It will potentially replace the output byte slice pointed to. -// After encoding, the out parameter contains the encoded contents. -func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - in := *out - if in == nil { - in = make([]byte, defEncByteBufSize) - } - z := bytesEncWriter{ - b: in, - out: out, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} -} - -// Encode writes an object into a stream in the codec format. -// -// Encoding can be configured via the "codec" struct tag for the fields. -// -// The "codec" key in struct field's tag value is the key name, -// followed by an optional comma and options. -// -// To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. -// -// Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's codec tag is "-", OR -// - the field is empty and its codec tag specifies the "omitempty" option. -// -// When encoding as a map, the first string in the tag (before the comma) -// is the map key string to use when encoding. -// -// However, struct values may encode as arrays. This happens when: -// - StructToArray Encode option is set, OR -// - the codec tag on the _struct field sets the "toarray" option -// -// Values with types that implement MapBySlice are encoded as stream maps. -// -// The empty values (for omitempty option) are false, 0, any nil pointer -// or interface value, and any array, slice, map, or string of length zero. -// -// Anonymous fields are encoded inline if no struct tag is present. -// Else they are encoded as regular fields. -// -// Examples: -// -// type MyStruct struct { -// _struct bool `codec:",omitempty"` //set omitempty for every field -// Field1 string `codec:"-"` //skip this field -// Field2 int `codec:"myName"` //Use key "myName" in encode stream -// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. -// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. -// ... -// } -// -// type MyStruct struct { -// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field -// //and encode struct as an array -// } -// -// The mode of encoding is based on the type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) -// - Else encode it based on its reflect.Kind -// -// Note that struct field names and keys in map[string]XXX will be treated as symbols. -// Some formats support symbols (e.g. binc) and will properly encode the string -// only once in the stream, and use a tag to refer to it thereafter. -func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErr(&err) - e.encode(v) - e.w.atEndOfEncode() - return -} - -func (e *Encoder) encode(iv interface{}) { - switch v := iv.(type) { - case nil: - e.e.encodeNil() - - case reflect.Value: - e.encodeValue(v) - - case string: - e.e.encodeString(c_UTF8, v) - case bool: - e.e.encodeBool(v) - case int: - e.e.encodeInt(int64(v)) - case int8: - e.e.encodeInt(int64(v)) - case int16: - e.e.encodeInt(int64(v)) - case int32: - e.e.encodeInt(int64(v)) - case int64: - e.e.encodeInt(v) - case uint: - e.e.encodeUint(uint64(v)) - case uint8: - e.e.encodeUint(uint64(v)) - case uint16: - e.e.encodeUint(uint64(v)) - case uint32: - e.e.encodeUint(uint64(v)) - case uint64: - e.e.encodeUint(v) - case float32: - e.e.encodeFloat32(v) - case float64: - e.e.encodeFloat64(v) - - case []interface{}: - e.encSliceIntf(v) - case []string: - e.encSliceStr(v) - case []int64: - e.encSliceInt64(v) - case []uint64: - e.encSliceUint64(v) - case []uint8: - e.e.encodeStringBytes(c_RAW, v) - - case map[interface{}]interface{}: - e.encMapIntfIntf(v) - case map[string]interface{}: - e.encMapStrIntf(v) - case map[string]string: - e.encMapStrStr(v) - case map[int64]interface{}: - e.encMapInt64Intf(v) - case map[uint64]interface{}: - e.encMapUint64Intf(v) - - case *string: - e.e.encodeString(c_UTF8, *v) - case *bool: - e.e.encodeBool(*v) - case *int: - e.e.encodeInt(int64(*v)) - case *int8: - e.e.encodeInt(int64(*v)) - case *int16: - e.e.encodeInt(int64(*v)) - case *int32: - e.e.encodeInt(int64(*v)) - case *int64: - e.e.encodeInt(*v) - case *uint: - e.e.encodeUint(uint64(*v)) - case *uint8: - e.e.encodeUint(uint64(*v)) - case *uint16: - e.e.encodeUint(uint64(*v)) - case *uint32: - e.e.encodeUint(uint64(*v)) - case *uint64: - e.e.encodeUint(*v) - case *float32: - e.e.encodeFloat32(*v) - case *float64: - e.e.encodeFloat64(*v) - - case *[]interface{}: - e.encSliceIntf(*v) - case *[]string: - e.encSliceStr(*v) - case *[]int64: - e.encSliceInt64(*v) - case *[]uint64: - e.encSliceUint64(*v) - case *[]uint8: - e.e.encodeStringBytes(c_RAW, *v) - - case *map[interface{}]interface{}: - e.encMapIntfIntf(*v) - case *map[string]interface{}: - e.encMapStrIntf(*v) - case *map[string]string: - e.encMapStrStr(*v) - case *map[int64]interface{}: - e.encMapInt64Intf(*v) - case *map[uint64]interface{}: - e.encMapUint64Intf(*v) - - default: - e.encodeValue(reflect.ValueOf(iv)) - } -} - -func (e *Encoder) encodeValue(rv reflect.Value) { - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - e.e.encodeNil() - return - } - rv = rv.Elem() - } - - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - - // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } - var fn encFn - var ok bool - if useMapForCodecCache { - fn, ok = e.f[rtid] - } else { - for i, v := range e.x { - if v == rtid { - fn, ok = e.s[i], true - break - } - } - } - if !ok { - // debugf("\tCreating new enc fn for type: %v\n", rt) - fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} - fn.i = &fi - if rtid == rawExtTypId { - fn.f = (*encFnInfo).rawExt - } else if e.e.isBuiltinType(rtid) { - fn.f = (*encFnInfo).builtin - } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*encFnInfo).ext - } else if supportBinaryMarshal && fi.ti.m { - fn.f = (*encFnInfo).binaryMarshal - } else { - switch rk := rt.Kind(); rk { - case reflect.Bool: - fn.f = (*encFnInfo).kBool - case reflect.String: - fn.f = (*encFnInfo).kString - case reflect.Float64: - fn.f = (*encFnInfo).kFloat64 - case reflect.Float32: - fn.f = (*encFnInfo).kFloat32 - case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: - fn.f = (*encFnInfo).kInt - case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: - fn.f = (*encFnInfo).kUint - case reflect.Invalid: - fn.f = (*encFnInfo).kInvalid - case reflect.Slice: - fn.f = (*encFnInfo).kSlice - case reflect.Array: - fn.f = (*encFnInfo).kArray - case reflect.Struct: - fn.f = (*encFnInfo).kStruct - // case reflect.Ptr: - // fn.f = (*encFnInfo).kPtr - case reflect.Interface: - fn.f = (*encFnInfo).kInterface - case reflect.Map: - fn.f = (*encFnInfo).kMap - default: - fn.f = (*encFnInfo).kErr - } - } - if useMapForCodecCache { - if e.f == nil { - e.f = make(map[uintptr]encFn, 16) - } - e.f[rtid] = fn - } else { - e.s = append(e.s, fn) - e.x = append(e.x, rtid) - } - } - - fn.f(fn.i, rv) - -} - -func (e *Encoder) encRawExt(re RawExt) { - if re.Data == nil { - e.e.encodeNil() - return - } - if e.hh.writeExt() { - e.e.encodeExtPreamble(re.Tag, len(re.Data)) - e.w.writeb(re.Data) - } else { - e.e.encodeStringBytes(c_RAW, re.Data) - } -} - -// --------------------------------------------- -// short circuit functions for common maps and slices - -func (e *Encoder) encSliceIntf(v []interface{}) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.encode(v2) - } -} - -func (e *Encoder) encSliceStr(v []string) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeString(c_UTF8, v2) - } -} - -func (e *Encoder) encSliceInt64(v []int64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeInt(v2) - } -} - -func (e *Encoder) encSliceUint64(v []uint64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeUint(v2) - } -} - -func (e *Encoder) encMapStrStr(v map[string]string) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.e.encodeString(c_UTF8, v2) - } -} - -func (e *Encoder) encMapStrIntf(v map[string]interface{}) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.encode(v2) - } -} - -func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeInt(k2) - e.encode(v2) - } -} - -func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeUint(uint64(k2)) - e.encode(v2) - } -} - -func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.encode(k2) - e.encode(v2) - } -} - -// ---------------------------------------- - -func encErr(format string, params ...interface{}) { - doPanic(msgTagEnc, format, params...) -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go deleted file mode 100644 index 7da3955edc99f..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go +++ /dev/null @@ -1,596 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// Contains code shared by both encode and decode. - -import ( - "encoding/binary" - "fmt" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" - "unicode" - "unicode/utf8" -) - -const ( - structTagName = "codec" - - // Support - // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) - // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error - // This constant flag will enable or disable it. - supportBinaryMarshal = true - - // Each Encoder or Decoder uses a cache of functions based on conditionals, - // so that the conditionals are not run every time. - // - // Either a map or a slice is used to keep track of the functions. - // The map is more natural, but has a higher cost than a slice/array. - // This flag (useMapForCodecCache) controls which is used. - useMapForCodecCache = false - - // For some common container types, we can short-circuit an elaborate - // reflection dance and call encode/decode directly. - // The currently supported types are: - // - slices of strings, or id's (int64,uint64) or interfaces. - // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf - shortCircuitReflectToFastPath = true - - // for debugging, set this to false, to catch panic traces. - // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. - recoverPanicToErr = true - - // if checkStructForEmptyValue, check structs fields to see if an empty value. - // This could be an expensive call, so possibly disable it. - checkStructForEmptyValue = false - - // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue - derefForIsEmptyValue = false -) - -type charEncoding uint8 - -const ( - c_RAW charEncoding = iota - c_UTF8 - c_UTF16LE - c_UTF16BE - c_UTF32LE - c_UTF32BE -) - -// valueType is the stream type -type valueType uint8 - -const ( - valueTypeUnset valueType = iota - valueTypeNil - valueTypeInt - valueTypeUint - valueTypeFloat - valueTypeBool - valueTypeString - valueTypeSymbol - valueTypeBytes - valueTypeMap - valueTypeArray - valueTypeTimestamp - valueTypeExt - - valueTypeInvalid = 0xff -) - -var ( - bigen = binary.BigEndian - structInfoFieldName = "_struct" - - cachedTypeInfo = make(map[uintptr]*typeInfo, 4) - cachedTypeInfoMutex sync.RWMutex - - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - strSliceTyp = reflect.TypeOf([]string(nil)) - boolSliceTyp = reflect.TypeOf([]bool(nil)) - uintSliceTyp = reflect.TypeOf([]uint(nil)) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - uint16SliceTyp = reflect.TypeOf([]uint16(nil)) - uint32SliceTyp = reflect.TypeOf([]uint32(nil)) - uint64SliceTyp = reflect.TypeOf([]uint64(nil)) - intSliceTyp = reflect.TypeOf([]int(nil)) - int8SliceTyp = reflect.TypeOf([]int8(nil)) - int16SliceTyp = reflect.TypeOf([]int16(nil)) - int32SliceTyp = reflect.TypeOf([]int32(nil)) - int64SliceTyp = reflect.TypeOf([]int64(nil)) - float32SliceTyp = reflect.TypeOf([]float32(nil)) - float64SliceTyp = reflect.TypeOf([]float64(nil)) - - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) - mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) - - mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) - mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) - mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) - mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() - - rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() - intfTypId = reflect.ValueOf(intfTyp).Pointer() - timeTypId = reflect.ValueOf(timeTyp).Pointer() - - intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() - strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() - - boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() - uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() - uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() - uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() - uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() - uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() - intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() - int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() - int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() - int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() - int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() - float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() - float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() - - mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() - mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() - mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() - mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() - mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() - mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() - mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() - // Id = reflect.ValueOf().Pointer() - // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() - - binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() - binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() - - intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) - uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) - - bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} - bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} -) - -type binaryUnmarshaler interface { - UnmarshalBinary(data []byte) error -} - -type binaryMarshaler interface { - MarshalBinary() (data []byte, err error) -} - -// MapBySlice represents a slice which should be encoded as a map in the stream. -// The slice contains a sequence of key-value pairs. -type MapBySlice interface { - MapBySlice() -} - -// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. -// -// BasicHandle encapsulates the common options and extension functions. -type BasicHandle struct { - extHandle - EncodeOptions - DecodeOptions -} - -// Handle is the interface for a specific encoding format. -// -// Typically, a Handle is pre-configured before first time use, -// and not modified while in use. Such a pre-configured Handle -// is safe for concurrent access. -type Handle interface { - writeExt() bool - getBasicHandle() *BasicHandle - newEncDriver(w encWriter) encDriver - newDecDriver(r decReader) decDriver -} - -// RawExt represents raw unprocessed extension data. -type RawExt struct { - Tag byte - Data []byte -} - -type extTypeTagFn struct { - rtid uintptr - rt reflect.Type - tag byte - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} - -type extHandle []*extTypeTagFn - -// AddExt registers an encode and decode function for a reflect.Type. -// Note that the type must be a named type, and specifically not -// a pointer or Interface. An error is returned if that is not honored. -// -// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. -func (o *extHandle) AddExt( - rt reflect.Type, - tag byte, - encfn func(reflect.Value) ([]byte, error), - decfn func(reflect.Value, []byte) error, -) (err error) { - // o is a pointer, because we may need to initialize it - if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", - reflect.Zero(rt).Interface()) - return - } - - // o cannot be nil, since it is always embedded in a Handle. - // if nil, let it panic. - // if o == nil { - // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") - // return - // } - - rtid := reflect.ValueOf(rt).Pointer() - for _, v := range *o { - if v.rtid == rtid { - v.tag, v.encFn, v.decFn = tag, encfn, decfn - return - } - } - - *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) - return -} - -func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { - for _, v := range o { - if v.rtid == rtid { - return v - } - } - return nil -} - -func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { - for _, v := range o { - if v.tag == tag { - return v - } - } - return nil -} - -func (o extHandle) getDecodeExtForTag(tag byte) ( - rv reflect.Value, fn func(reflect.Value, []byte) error) { - if x := o.getExtForTag(tag); x != nil { - // ext is only registered for base - rv = reflect.New(x.rt).Elem() - fn = x.decFn - } - return -} - -func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.decFn - } - return -} - -func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.encFn - } - return -} - -type structFieldInfo struct { - encName string // encode name - - // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. - - is []int // (recursive/embedded) field index in struct - i int16 // field index in struct - omitEmpty bool - toArray bool // if field is _struct, is the toArray set? - - // tag string // tag - // name string // field name - // encNameBs []byte // encoded name as byte stream - // ikind int // kind of the field as an int i.e. int(reflect.Kind) -} - -func parseStructFieldInfo(fname string, stag string) *structFieldInfo { - if fname == "" { - panic("parseStructFieldInfo: No Field Name") - } - si := structFieldInfo{ - // name: fname, - encName: fname, - // tag: stag, - } - - if stag != "" { - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s - } - } else { - switch s { - case "omitempty": - si.omitEmpty = true - case "toarray": - si.toArray = true - } - } - } - } - // si.encNameBs = []byte(si.encName) - return &si -} - -type sfiSortedByEncName []*structFieldInfo - -func (p sfiSortedByEncName) Len() int { - return len(p) -} - -func (p sfiSortedByEncName) Less(i, j int) bool { - return p[i].encName < p[j].encName -} - -func (p sfiSortedByEncName) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -// typeInfo keeps information about each type referenced in the encode/decode sequence. -// -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. - - rt reflect.Type - rtid uintptr - - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base - - mbs bool // base type (T or *T) is a MapBySlice - - m bool // base type (T or *T) is a binaryMarshaler - unm bool // base type (T or *T) is a binaryUnmarshaler - mIndir int8 // number of indirections to get to binaryMarshaler type - unmIndir int8 // number of indirections to get to binaryUnmarshaler type - toArray bool // whether this (struct) type should be encoded as an array -} - -func (ti *typeInfo) indexForEncName(name string) int { - //tisfi := ti.sfi - const binarySearchThreshold = 16 - if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { - // linear search. faster than binary search in my testing up to 16-field structs. - for i, si := range ti.sfi { - if si.encName == name { - return i - } - } - } else { - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i - } - } - return -1 -} - -func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - var ok bool - cachedTypeInfoMutex.RLock() - pti, ok = cachedTypeInfo[rtid] - cachedTypeInfoMutex.RUnlock() - if ok { - return - } - - cachedTypeInfoMutex.Lock() - defer cachedTypeInfoMutex.Unlock() - if pti, ok = cachedTypeInfo[rtid]; ok { - return - } - - ti := typeInfo{rt: rt, rtid: rtid} - pti = &ti - - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.m, ti.mIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.unm, ti.unmIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } - - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = reflect.ValueOf(pt).Pointer() - ti.baseIndir = ptIndir - } - - if rt.Kind() == reflect.Struct { - var siInfo *structFieldInfo - if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) - ti.toArray = siInfo.toArray - } - sfip := make([]*structFieldInfo, 0, rt.NumField()) - rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) - - // // try to put all si close together - // const tryToPutAllStructFieldInfoTogether = true - // if tryToPutAllStructFieldInfoTogether { - // sfip2 := make([]structFieldInfo, len(sfip)) - // for i, si := range sfip { - // sfip2[i] = *si - // } - // for i := range sfip { - // sfip[i] = &sfip2[i] - // } - // } - - ti.sfip = make([]*structFieldInfo, len(sfip)) - ti.sfi = make([]*structFieldInfo, len(sfip)) - copy(ti.sfip, sfip) - sort.Sort(sfiSortedByEncName(sfip)) - copy(ti.sfi, sfip) - } - // sfi = sfip - cachedTypeInfo[rtid] = pti - return -} - -func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, - sfi *[]*structFieldInfo, siInfo *structFieldInfo, -) { - // for rt.Kind() == reflect.Ptr { - // // indexstack = append(indexstack, 0) - // rt = rt.Elem() - // } - for j := 0; j < rt.NumField(); j++ { - f := rt.Field(j) - stag := f.Tag.Get(structTagName) - if stag == "-" { - continue - } - if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { - continue - } - // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. - if f.Anonymous && stag == "" { - ft := f.Type - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - if ft.Kind() == reflect.Struct { - indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) - continue - } - } - // do not let fields with same name in embedded structs override field at higher level. - // this must be done after anonymous check, to allow anonymous field - // still include their child fields - if _, ok := fnameToHastag[f.Name]; ok { - continue - } - si := parseStructFieldInfo(f.Name, stag) - // si.ikind = int(f.Type.Kind()) - if len(indexstack) == 0 { - si.i = int16(j) - } else { - si.i = -1 - si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - } - - if siInfo != nil { - if siInfo.omitEmpty { - si.omitEmpty = true - } - } - *sfi = append(*sfi, si) - fnameToHastag[f.Name] = stag != "" - } -} - -func panicToErr(err *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - //debug.PrintStack() - panicValToErr(x, err) - } - } -} - -func doPanic(tag string, format string, params ...interface{}) { - params2 := make([]interface{}, len(params)+1) - params2[0] = tag - copy(params2[1:], params) - panic(fmt.Errorf("%s: "+format, params2...)) -} - -func checkOverflowFloat32(f float64, doCheck bool) { - if !doCheck { - return - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() - f2 := f - if f2 < 0 { - f2 = -f - } - if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { - decErr("Overflow float32 value: %v", f2) - } -} - -func checkOverflow(ui uint64, i int64, bitsize uint8) { - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize == 0 { - return - } - if i != 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) - } - } - if ui != 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) - } - } -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go deleted file mode 100644 index 93f12854f21ae..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// All non-std package dependencies live in this file, -// so porting to different environment is easy (just update functions). - -import ( - "errors" - "fmt" - "math" - "reflect" -) - -var ( - raisePanicAfterRecover = false - debugging = true -) - -func panicValToErr(panicVal interface{}, err *error) { - switch xerr := panicVal.(type) { - case error: - *err = xerr - case string: - *err = errors.New(xerr) - default: - *err = fmt.Errorf("%v", panicVal) - } - if raisePanicAfterRecover { - panic(panicVal) - } - return -} - -func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - switch v.Kind() { - case reflect.Invalid: - return true - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return hIsEmptyValue(v.Elem(), deref, checkStruct) - } else { - return v.IsNil() - } - case reflect.Struct: - if !checkStruct { - return false - } - // return true if all fields are empty. else return false. - - // we cannot use equality check, because some fields may be maps/slices/etc - // and consequently the structs are not comparable. - // return v.Interface() == reflect.Zero(v.Type()).Interface() - for i, n := 0, v.NumField(); i < n; i++ { - if !hIsEmptyValue(v.Field(i), deref, checkStruct) { - return false - } - } - return true - } - return false -} - -func isEmptyValue(v reflect.Value) bool { - return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) -} - -func debugf(format string, args ...interface{}) { - if debugging { - if len(format) == 0 || format[len(format)-1] != '\n' { - format = format + "\n" - } - fmt.Printf(format, args...) - } -} - -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } - } - return -} - -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return - } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue - } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 - } - } - return false, 0 -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go deleted file mode 100644 index da0500d19223b..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go +++ /dev/null @@ -1,816 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -/* -MSGPACK - -Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. -We need to maintain compatibility with it and how it encodes integer values -without caring about the type. - -For compatibility with behaviour of msgpack-c reference implementation: - - Go intX (>0) and uintX - IS ENCODED AS - msgpack +ve fixnum, unsigned - - Go intX (<0) - IS ENCODED AS - msgpack -ve fixnum, signed - -*/ -package codec - -import ( - "fmt" - "io" - "math" - "net/rpc" -) - -const ( - mpPosFixNumMin byte = 0x00 - mpPosFixNumMax = 0x7f - mpFixMapMin = 0x80 - mpFixMapMax = 0x8f - mpFixArrayMin = 0x90 - mpFixArrayMax = 0x9f - mpFixStrMin = 0xa0 - mpFixStrMax = 0xbf - mpNil = 0xc0 - _ = 0xc1 - mpFalse = 0xc2 - mpTrue = 0xc3 - mpFloat = 0xca - mpDouble = 0xcb - mpUint8 = 0xcc - mpUint16 = 0xcd - mpUint32 = 0xce - mpUint64 = 0xcf - mpInt8 = 0xd0 - mpInt16 = 0xd1 - mpInt32 = 0xd2 - mpInt64 = 0xd3 - - // extensions below - mpBin8 = 0xc4 - mpBin16 = 0xc5 - mpBin32 = 0xc6 - mpExt8 = 0xc7 - mpExt16 = 0xc8 - mpExt32 = 0xc9 - mpFixExt1 = 0xd4 - mpFixExt2 = 0xd5 - mpFixExt4 = 0xd6 - mpFixExt8 = 0xd7 - mpFixExt16 = 0xd8 - - mpStr8 = 0xd9 // new - mpStr16 = 0xda - mpStr32 = 0xdb - - mpArray16 = 0xdc - mpArray32 = 0xdd - - mpMap16 = 0xde - mpMap32 = 0xdf - - mpNegFixNumMin = 0xe0 - mpNegFixNumMax = 0xff -) - -// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec -// that the backend RPC service takes multiple arguments, which have been arranged -// in sequence in the slice. -// -// The Codec then passes it AS-IS to the rpc service (without wrapping it in an -// array of 1 element). -type MsgpackSpecRpcMultiArgs []interface{} - -// A MsgpackContainer type specifies the different types of msgpackContainers. -type msgpackContainerType struct { - fixCutoff int - bFixMin, b8, b16, b32 byte - hasFixMin, has8, has8Always bool -} - -var ( - msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} - msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} - msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} - msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} -) - -//--------------------------------------------- - -type msgpackEncDriver struct { - w encWriter - h *MsgpackHandle -} - -func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} - -func (e *msgpackEncDriver) encodeNil() { - e.w.writen1(mpNil) -} - -func (e *msgpackEncDriver) encodeInt(i int64) { - - switch { - case i >= 0: - e.encodeUint(uint64(i)) - case i >= -32: - e.w.writen1(byte(i)) - case i >= math.MinInt8: - e.w.writen2(mpInt8, byte(i)) - case i >= math.MinInt16: - e.w.writen1(mpInt16) - e.w.writeUint16(uint16(i)) - case i >= math.MinInt32: - e.w.writen1(mpInt32) - e.w.writeUint32(uint32(i)) - default: - e.w.writen1(mpInt64) - e.w.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) encodeUint(i uint64) { - switch { - case i <= math.MaxInt8: - e.w.writen1(byte(i)) - case i <= math.MaxUint8: - e.w.writen2(mpUint8, byte(i)) - case i <= math.MaxUint16: - e.w.writen1(mpUint16) - e.w.writeUint16(uint16(i)) - case i <= math.MaxUint32: - e.w.writen1(mpUint32) - e.w.writeUint32(uint32(i)) - default: - e.w.writen1(mpUint64) - e.w.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) encodeBool(b bool) { - if b { - e.w.writen1(mpTrue) - } else { - e.w.writen1(mpFalse) - } -} - -func (e *msgpackEncDriver) encodeFloat32(f float32) { - e.w.writen1(mpFloat) - e.w.writeUint32(math.Float32bits(f)) -} - -func (e *msgpackEncDriver) encodeFloat64(f float64) { - e.w.writen1(mpDouble) - e.w.writeUint64(math.Float64bits(f)) -} - -func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - switch { - case l == 1: - e.w.writen2(mpFixExt1, xtag) - case l == 2: - e.w.writen2(mpFixExt2, xtag) - case l == 4: - e.w.writen2(mpFixExt4, xtag) - case l == 8: - e.w.writen2(mpFixExt8, xtag) - case l == 16: - e.w.writen2(mpFixExt16, xtag) - case l < 256: - e.w.writen2(mpExt8, byte(l)) - e.w.writen1(xtag) - case l < 65536: - e.w.writen1(mpExt16) - e.w.writeUint16(uint16(l)) - e.w.writen1(xtag) - default: - e.w.writen1(mpExt32) - e.w.writeUint32(uint32(l)) - e.w.writen1(xtag) - } -} - -func (e *msgpackEncDriver) encodeArrayPreamble(length int) { - e.writeContainerLen(msgpackContainerList, length) -} - -func (e *msgpackEncDriver) encodeMapPreamble(length int) { - e.writeContainerLen(msgpackContainerMap, length) -} - -func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(s)) - } else { - e.writeContainerLen(msgpackContainerStr, len(s)) - } - if len(s) > 0 { - e.w.writestr(s) - } -} - -func (e *msgpackEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) -} - -func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(bs)) - } else { - e.writeContainerLen(msgpackContainerStr, len(bs)) - } - if len(bs) > 0 { - e.w.writeb(bs) - } -} - -func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - switch { - case ct.hasFixMin && l < ct.fixCutoff: - e.w.writen1(ct.bFixMin | byte(l)) - case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): - e.w.writen2(ct.b8, uint8(l)) - case l < 65536: - e.w.writen1(ct.b16) - e.w.writeUint16(uint16(l)) - default: - e.w.writen1(ct.b32) - e.w.writeUint32(uint32(l)) - } -} - -//--------------------------------------------- - -type msgpackDecDriver struct { - r decReader - h *MsgpackHandle - bd byte - bdRead bool - bdType valueType -} - -func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} - -// Note: This returns either a primitive (int, bool, etc) for non-containers, -// or a containerType, or a specific type denoting nil or extension. -// It is called when a nil interface{} is passed, leaving it up to the DecDriver -// to introspect the stream and decide how best to decode. -// It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() - bd := d.bd - - switch bd { - case mpNil: - vt = valueTypeNil - d.bdRead = false - case mpFalse: - vt = valueTypeBool - v = false - case mpTrue: - vt = valueTypeBool - v = true - - case mpFloat: - vt = valueTypeFloat - v = float64(math.Float32frombits(d.r.readUint32())) - case mpDouble: - vt = valueTypeFloat - v = math.Float64frombits(d.r.readUint64()) - - case mpUint8: - vt = valueTypeUint - v = uint64(d.r.readn1()) - case mpUint16: - vt = valueTypeUint - v = uint64(d.r.readUint16()) - case mpUint32: - vt = valueTypeUint - v = uint64(d.r.readUint32()) - case mpUint64: - vt = valueTypeUint - v = uint64(d.r.readUint64()) - - case mpInt8: - vt = valueTypeInt - v = int64(int8(d.r.readn1())) - case mpInt16: - vt = valueTypeInt - v = int64(int16(d.r.readUint16())) - case mpInt32: - vt = valueTypeInt - v = int64(int32(d.r.readUint32())) - case mpInt64: - vt = valueTypeInt - v = int64(int64(d.r.readUint64())) - - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - // positive fixnum (always signed) - vt = valueTypeInt - v = int64(int8(bd)) - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - // negative fixnum - vt = valueTypeInt - v = int64(int8(bd)) - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - var rvm string - vt = valueTypeString - v = &rvm - } else { - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm - } - decodeFurther = true - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm - decodeFurther = true - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - vt = valueTypeArray - decodeFurther = true - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - vt = valueTypeMap - decodeFurther = true - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - clen := d.readExtLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(clen) - v = &re - vt = valueTypeExt - default: - decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - if !decodeFurther { - d.bdRead = false - } - return -} - -// int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { - switch d.bd { - case mpUint8: - i = int64(uint64(d.r.readn1())) - case mpUint16: - i = int64(uint64(d.r.readUint16())) - case mpUint32: - i = int64(uint64(d.r.readUint32())) - case mpUint64: - i = int64(d.r.readUint64()) - case mpInt8: - i = int64(int8(d.r.readn1())) - case mpInt16: - i = int64(int16(d.r.readUint16())) - case mpInt32: - i = int64(int32(d.r.readUint32())) - case mpInt64: - i = int64(d.r.readUint64()) - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - i = int64(int8(d.bd)) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - i = int64(int8(d.bd)) - default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) - } - } - d.bdRead = false - return -} - -// uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { - switch d.bd { - case mpUint8: - ui = uint64(d.r.readn1()) - case mpUint16: - ui = uint64(d.r.readUint16()) - case mpUint32: - ui = uint64(d.r.readUint32()) - case mpUint64: - ui = d.r.readUint64() - case mpInt8: - if i := int64(int8(d.r.readn1())); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - case mpInt16: - if i := int64(int16(d.r.readUint16())); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - case mpInt32: - if i := int64(int32(d.r.readUint32())); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - case mpInt64: - if i := int64(d.r.readUint64()); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - ui = uint64(d.bd) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) - default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) - } - } - d.bdRead = false - return -} - -// float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case mpFloat: - f = float64(math.Float32frombits(d.r.readUint32())) - case mpDouble: - f = math.Float64frombits(d.r.readUint64()) - default: - f = float64(d.decodeInt(0)) - } - checkOverflowFloat32(f, chkOverflow32) - d.bdRead = false - return -} - -// bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) decodeBool() (b bool) { - switch d.bd { - case mpFalse, 0: - // b = false - case mpTrue, 1: - b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) decodeString() (s string) { - clen := d.readContainerLen(msgpackContainerStr) - if clen > 0 { - s = string(d.r.readn(clen)) - } - d.bdRead = false - return -} - -// Callers must check if changed=true (to decide whether to replace the one they have) -func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - // bytes can be decoded from msgpackContainerStr or msgpackContainerBin - var clen int - switch d.bd { - case mpBin8, mpBin16, mpBin32: - clen = d.readContainerLen(msgpackContainerBin) - default: - clen = d.readContainerLen(msgpackContainerStr) - } - // if clen < 0 { - // changed = true - // panic("length cannot be zero. this cannot be nil.") - // } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - // Return changed=true if length of passed slice diff from length of bytes in stream - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) - } - d.bdRead = false - return -} - -// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. -func (d *msgpackDecDriver) initReadNext() { - if d.bdRead { - return - } - d.bd = d.r.readn1() - d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *msgpackDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - bd := d.bd - switch bd { - case mpNil: - d.bdType = valueTypeNil - case mpFalse, mpTrue: - d.bdType = valueTypeBool - case mpFloat, mpDouble: - d.bdType = valueTypeFloat - case mpUint8, mpUint16, mpUint32, mpUint64: - d.bdType = valueTypeUint - case mpInt8, mpInt16, mpInt32, mpInt64: - d.bdType = valueTypeInt - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - d.bdType = valueTypeInt - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - d.bdType = valueTypeInt - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - d.bdType = valueTypeString - } else { - d.bdType = valueTypeBytes - } - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - d.bdType = valueTypeBytes - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - d.bdType = valueTypeArray - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - d.bdType = valueTypeMap - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - d.bdType = valueTypeExt - default: - decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - } - return d.bdType -} - -func (d *msgpackDecDriver) tryDecodeAsNil() bool { - if d.bd == mpNil { - d.bdRead = false - return true - } - return false -} - -func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { - bd := d.bd - switch { - case bd == mpNil: - clen = -1 // to represent nil - case bd == ct.b8: - clen = int(d.r.readn1()) - case bd == ct.b16: - clen = int(d.r.readUint16()) - case bd == ct.b32: - clen = int(d.r.readUint32()) - case (ct.bFixMin & bd) == ct.bFixMin: - clen = int(ct.bFixMin ^ bd) - default: - decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) readMapLen() int { - return d.readContainerLen(msgpackContainerMap) -} - -func (d *msgpackDecDriver) readArrayLen() int { - return d.readContainerLen(msgpackContainerList) -} - -func (d *msgpackDecDriver) readExtLen() (clen int) { - switch d.bd { - case mpNil: - clen = -1 // to represent nil - case mpFixExt1: - clen = 1 - case mpFixExt2: - clen = 2 - case mpFixExt4: - clen = 4 - case mpFixExt8: - clen = 8 - case mpFixExt16: - clen = 16 - case mpExt8: - clen = int(d.r.readn1()) - case mpExt16: - clen = int(d.r.readUint16()) - case mpExt32: - clen = int(d.r.readUint32()) - default: - decErr("decoding ext bytes: found unexpected byte: %x", d.bd) - } - return -} - -func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - xbd := d.bd - switch { - case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: - xbs, _ = d.decodeBytes(nil) - case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, - xbd >= mpFixStrMin && xbd <= mpFixStrMax: - xbs = []byte(d.decodeString()) - default: - clen := d.readExtLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - xbs = d.r.readn(clen) - } - d.bdRead = false - return -} - -//-------------------------------------------------- - -//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. -type MsgpackHandle struct { - BasicHandle - - // RawToString controls how raw bytes are decoded into a nil interface{}. - RawToString bool - // WriteExt flag supports encoding configured extensions with extension tags. - // It also controls whether other elements of the new spec are encoded (ie Str8). - // - // With WriteExt=false, configured extensions are serialized as raw bytes - // and Str8 is not encoded. - // - // A stream can still be decoded into a typed value, provided an appropriate value - // is provided, but the type cannot be inferred from the stream. If no appropriate - // type is provided (e.g. decoding into a nil interface{}), you get back - // a []byte or string based on the setting of RawToString. - WriteExt bool -} - -func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { - return &msgpackEncDriver{w: w, h: h} -} - -func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { - return &msgpackDecDriver{r: r, h: h} -} - -func (h *MsgpackHandle) writeExt() bool { - return h.WriteExt -} - -func (h *MsgpackHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle -} - -//-------------------------------------------------- - -type msgpackSpecRpcCodec struct { - rpcCodec -} - -// /////////////// Spec RPC Codec /////////////////// -func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // WriteRequest can write to both a Go service, and other services that do - // not abide by the 1 argument rule of a Go service. - // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs - var bodyArr []interface{} - if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { - bodyArr = ([]interface{})(m) - } else { - bodyArr = []interface{}{body} - } - r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - var moe interface{} - if r.Error != "" { - moe = r.Error - } - if moe != nil && body != nil { - body = nil - } - r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.parseCustomHeader(1, &r.Seq, &r.Error) -} - -func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) -} - -func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { - if body == nil { // read and discard - return c.read(nil) - } - bodyArr := []interface{}{body} - return c.read(&bodyArr) -} - -func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - - if c.cls { - return io.EOF - } - - // We read the response header by hand - // so that the body can be decoded on its own from the stream at a later time. - - const fia byte = 0x94 //four item array descriptor value - // Not sure why the panic of EOF is swallowed above. - // if bs1 := c.dec.r.readn1(); bs1 != fia { - // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) - // return - // } - var b byte - b, err = c.br.ReadByte() - if err != nil { - return - } - if b != fia { - err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) - return - } - - if err = c.read(&b); err != nil { - return - } - if b != expectTypeByte { - err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) - return - } - if err = c.read(msgid); err != nil { - return - } - if err = c.read(methodOrError); err != nil { - return - } - return -} - -//-------------------------------------------------- - -// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol -// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md -type msgpackSpecRpc struct{} - -// MsgpackSpecRpc implements Rpc using the communication protocol defined in -// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var MsgpackSpecRpc msgpackSpecRpc - -func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -var _ decDriver = (*msgpackDecDriver)(nil) -var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go deleted file mode 100644 index d014dbdcc7d0a..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "bufio" - "io" - "net/rpc" - "sync" -) - -// Rpc provides a rpc Server or Client Codec for rpc communication. -type Rpc interface { - ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec - ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec -} - -// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer -// used by the rpc connection. It accomodates use-cases where the connection -// should be used by rpc and non-rpc functions, e.g. streaming a file after -// sending an rpc response. -type RpcCodecBuffered interface { - BufferedReader() *bufio.Reader - BufferedWriter() *bufio.Writer -} - -// ------------------------------------- - -// rpcCodec defines the struct members and common methods. -type rpcCodec struct { - rwc io.ReadWriteCloser - dec *Decoder - enc *Encoder - bw *bufio.Writer - br *bufio.Reader - mu sync.Mutex - cls bool -} - -func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { - bw := bufio.NewWriter(conn) - br := bufio.NewReader(conn) - return rpcCodec{ - rwc: conn, - bw: bw, - br: br, - enc: NewEncoder(bw, h), - dec: NewDecoder(br, h), - } -} - -func (c *rpcCodec) BufferedReader() *bufio.Reader { - return c.br -} - -func (c *rpcCodec) BufferedWriter() *bufio.Writer { - return c.bw -} - -func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { - if c.cls { - return io.EOF - } - if err = c.enc.Encode(obj1); err != nil { - return - } - if writeObj2 { - if err = c.enc.Encode(obj2); err != nil { - return - } - } - if doFlush && c.bw != nil { - return c.bw.Flush() - } - return -} - -func (c *rpcCodec) read(obj interface{}) (err error) { - if c.cls { - return io.EOF - } - //If nil is passed in, we should still attempt to read content to nowhere. - if obj == nil { - var obj2 interface{} - return c.dec.Decode(&obj2) - } - return c.dec.Decode(obj) -} - -func (c *rpcCodec) Close() error { - if c.cls { - return io.EOF - } - c.cls = true - return c.rwc.Close() -} - -func (c *rpcCodec) ReadResponseBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -type goRpcCodec struct { - rpcCodec -} - -func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // Must protect for concurrent access as per API - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -// goRpc is the implementation of Rpc that uses the communication protocol -// as defined in net/rpc package. -type goRpc struct{} - -// GoRpc implements Rpc using the communication protocol defined in net/rpc package. -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var GoRpc goRpc - -func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go deleted file mode 100644 index 9e4d148a2a179..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import "math" - -const ( - _ uint8 = iota - simpleVdNil = 1 - simpleVdFalse = 2 - simpleVdTrue = 3 - simpleVdFloat32 = 4 - simpleVdFloat64 = 5 - - // each lasts for 4 (ie n, n+1, n+2, n+3) - simpleVdPosInt = 8 - simpleVdNegInt = 12 - - // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) - simpleVdString = 216 - simpleVdByteArray = 224 - simpleVdArray = 232 - simpleVdMap = 240 - simpleVdExt = 248 -) - -type simpleEncDriver struct { - h *SimpleHandle - w encWriter - //b [8]byte -} - -func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { - return false -} - -func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { -} - -func (e *simpleEncDriver) encodeNil() { - e.w.writen1(simpleVdNil) -} - -func (e *simpleEncDriver) encodeBool(b bool) { - if b { - e.w.writen1(simpleVdTrue) - } else { - e.w.writen1(simpleVdFalse) - } -} - -func (e *simpleEncDriver) encodeFloat32(f float32) { - e.w.writen1(simpleVdFloat32) - e.w.writeUint32(math.Float32bits(f)) -} - -func (e *simpleEncDriver) encodeFloat64(f float64) { - e.w.writen1(simpleVdFloat64) - e.w.writeUint64(math.Float64bits(f)) -} - -func (e *simpleEncDriver) encodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-v), simpleVdNegInt) - } else { - e.encUint(uint64(v), simpleVdPosInt) - } -} - -func (e *simpleEncDriver) encodeUint(v uint64) { - e.encUint(v, simpleVdPosInt) -} - -func (e *simpleEncDriver) encUint(v uint64, bd uint8) { - switch { - case v <= math.MaxUint8: - e.w.writen2(bd, uint8(v)) - case v <= math.MaxUint16: - e.w.writen1(bd + 1) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: - e.w.writen1(bd + 2) - e.w.writeUint32(uint32(v)) - case v <= math.MaxUint64: - e.w.writen1(bd + 3) - e.w.writeUint64(v) - } -} - -func (e *simpleEncDriver) encLen(bd byte, length int) { - switch { - case length == 0: - e.w.writen1(bd) - case length <= math.MaxUint8: - e.w.writen1(bd + 1) - e.w.writen1(uint8(length)) - case length <= math.MaxUint16: - e.w.writen1(bd + 2) - e.w.writeUint16(uint16(length)) - case int64(length) <= math.MaxUint32: - e.w.writen1(bd + 3) - e.w.writeUint32(uint32(length)) - default: - e.w.writen1(bd + 4) - e.w.writeUint64(uint64(length)) - } -} - -func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(simpleVdExt, length) - e.w.writen1(xtag) -} - -func (e *simpleEncDriver) encodeArrayPreamble(length int) { - e.encLen(simpleVdArray, length) -} - -func (e *simpleEncDriver) encodeMapPreamble(length int) { - e.encLen(simpleVdMap, length) -} - -func (e *simpleEncDriver) encodeString(c charEncoding, v string) { - e.encLen(simpleVdString, len(v)) - e.w.writestr(v) -} - -func (e *simpleEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) -} - -func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { - e.encLen(simpleVdByteArray, len(v)) - e.w.writeb(v) -} - -//------------------------------------ - -type simpleDecDriver struct { - h *SimpleHandle - r decReader - bdRead bool - bdType valueType - bd byte - //b [8]byte -} - -func (d *simpleDecDriver) initReadNext() { - if d.bdRead { - return - } - d.bd = d.r.readn1() - d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *simpleDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.bd { - case simpleVdNil: - d.bdType = valueTypeNil - case simpleVdTrue, simpleVdFalse: - d.bdType = valueTypeBool - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - d.bdType = valueTypeUint - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - d.bdType = valueTypeInt - case simpleVdFloat32, simpleVdFloat64: - d.bdType = valueTypeFloat - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - d.bdType = valueTypeString - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - d.bdType = valueTypeBytes - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - d.bdType = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - d.bdType = valueTypeArray - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) - } - } - return d.bdType -} - -func (d *simpleDecDriver) tryDecodeAsNil() bool { - if d.bd == simpleVdNil { - d.bdRead = false - return true - } - return false -} - -func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { - return false -} - -func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { -} - -func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { - switch d.bd { - case simpleVdPosInt: - ui = uint64(d.r.readn1()) - i = int64(ui) - case simpleVdPosInt + 1: - ui = uint64(d.r.readUint16()) - i = int64(ui) - case simpleVdPosInt + 2: - ui = uint64(d.r.readUint32()) - i = int64(ui) - case simpleVdPosInt + 3: - ui = uint64(d.r.readUint64()) - i = int64(ui) - case simpleVdNegInt: - ui = uint64(d.r.readn1()) - i = -(int64(ui)) - neg = true - case simpleVdNegInt + 1: - ui = uint64(d.r.readUint16()) - i = -(int64(ui)) - neg = true - case simpleVdNegInt + 2: - ui = uint64(d.r.readUint32()) - i = -(int64(ui)) - neg = true - case simpleVdNegInt + 3: - ui = uint64(d.r.readUint64()) - i = -(int64(ui)) - neg = true - default: - decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) - } - // don't do this check, because callers may only want the unsigned value. - // if ui > math.MaxInt64 { - // decErr("decIntAny: Integer out of range for signed int64: %v", ui) - // } - return -} - -func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() - if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - checkOverflow(ui, 0, bitsize) - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case simpleVdFloat32: - f = float64(math.Float32frombits(d.r.readUint32())) - case simpleVdFloat64: - f = math.Float64frombits(d.r.readUint64()) - default: - if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - _, i, _ := d.decIntAny() - f = float64(i) - } else { - decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) - } - } - checkOverflowFloat32(f, chkOverflow32) - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) decodeBool() (b bool) { - switch d.bd { - case simpleVdTrue: - b = true - case simpleVdFalse: - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) readMapLen() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) readArrayLen() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) decLen() int { - switch d.bd % 8 { - case 0: - return 0 - case 1: - return int(d.r.readn1()) - case 2: - return int(d.r.readUint16()) - case 3: - ui := uint64(d.r.readUint32()) - checkOverflow(ui, 0, intBitsize) - return int(ui) - case 4: - ui := d.r.readUint64() - checkOverflow(ui, 0, intBitsize) - return int(ui) - } - decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) - return -1 -} - -func (d *simpleDecDriver) decodeString() (s string) { - s = string(d.r.readn(d.decLen())) - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - if clen := d.decLen(); clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - switch d.bd { - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - xbs = d.r.readn(l) - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs, _ = d.decodeBytes(nil) - default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() - - switch d.bd { - case simpleVdNil: - vt = valueTypeNil - case simpleVdFalse: - vt = valueTypeBool - v = false - case simpleVdTrue: - vt = valueTypeBool - v = true - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - vt = valueTypeUint - ui, _, _ := d.decIntAny() - v = ui - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - vt = valueTypeInt - _, i, _ := d.decIntAny() - v = i - case simpleVdFloat32: - vt = valueTypeFloat - v = d.decodeFloat(true) - case simpleVdFloat64: - vt = valueTypeFloat - v = d.decodeFloat(false) - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - vt = valueTypeString - v = d.decodeString() - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - vt = valueTypeExt - l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - vt = valueTypeArray - decodeFurther = true - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - vt = valueTypeMap - decodeFurther = true - default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -// SimpleHandle is a Handle for a very simple encoding format. -// -// simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceeded by the descriptor byte (bd) -// - True, false, nil are encoded fully in 1 byte (the descriptor) -// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). -// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. -// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Lenght of containers (strings, bytes, array, map, extensions) -// are encoded in 0, 1, 2, 4 or 8 bytes. -// Zero-length containers have no length encoded. -// For others, the number of bytes is given by pow(2, bd%3) -// - maps are encoded as [bd] [length] [[key][value]]... -// - arrays are encoded as [bd] [length] [value]... -// - extensions are encoded as [bd] [length] [tag] [byte]... -// - strings/bytearrays are encoded as [bd] [length] [byte]... -// -// The full spec will be published soon. -type SimpleHandle struct { - BasicHandle -} - -func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { - return &simpleEncDriver{w: w, h: h} -} - -func (h *SimpleHandle) newDecDriver(r decReader) decDriver { - return &simpleDecDriver{r: r, h: h} -} - -func (_ *SimpleHandle) writeExt() bool { - return true -} - -func (h *SimpleHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle -} - -var _ decDriver = (*simpleDecDriver)(nil) -var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/time.go b/vendor/github.com/hashicorp/go-msgpack/codec/time.go deleted file mode 100644 index c86d65328d76a..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/time.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "time" -) - -var ( - timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} -) - -// EncodeTime encodes a time.Time as a []byte, including -// information on the instant in time and UTC offset. -// -// Format Description -// -// A timestamp is composed of 3 components: -// -// - secs: signed integer representing seconds since unix epoch -// - nsces: unsigned integer representing fractional seconds as a -// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 -// - tz: signed integer representing timezone offset in minutes east of UTC, -// and a dst (daylight savings time) flag -// -// When encoding a timestamp, the first byte is the descriptor, which -// defines which components are encoded and how many bytes are used to -// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it -// is not encoded in the byte array explicitly*. -// -// Descriptor 8 bits are of the form `A B C DDD EE`: -// A: Is secs component encoded? 1 = true -// B: Is nsecs component encoded? 1 = true -// C: Is tz component encoded? 1 = true -// DDD: Number of extra bytes for secs (range 0-7). -// If A = 1, secs encoded in DDD+1 bytes. -// If A = 0, secs is not encoded, and is assumed to be 0. -// If A = 1, then we need at least 1 byte to encode secs. -// DDD says the number of extra bytes beyond that 1. -// E.g. if DDD=0, then secs is represented in 1 byte. -// if DDD=2, then secs is represented in 3 bytes. -// EE: Number of extra bytes for nsecs (range 0-3). -// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) -// -// Following the descriptor bytes, subsequent bytes are: -// -// secs component encoded in `DDD + 1` bytes (if A == 1) -// nsecs component encoded in `EE + 1` bytes (if B == 1) -// tz component encoded in 2 bytes (if C == 1) -// -// secs and nsecs components are integers encoded in a BigEndian -// 2-complement encoding format. -// -// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to -// Least significant bit 0 are described below: -// -// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). -// Bit 15 = have\_dst: set to 1 if we set the dst flag. -// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. -// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. -// -func encodeTime(t time.Time) []byte { - //t := rv.Interface().(time.Time) - tsecs, tnsecs := t.Unix(), t.Nanosecond() - var ( - bd byte - btmp [8]byte - bs [16]byte - i int = 1 - ) - l := t.Location() - if l == time.UTC { - l = nil - } - if tsecs != 0 { - bd = bd | 0x80 - bigen.PutUint64(btmp[:], uint64(tsecs)) - f := pruneSignExt(btmp[:], tsecs >= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// DecodeTime decodes a []byte into a time.Time. -func decodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printed. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - //tzname[3] = '-' (TODO: verify. this works here) - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return -} - -func timeLocUTCName(tzint int16) string { - if tzint == 0 { - return "UTC" - } - var tzname = []byte("UTC+00:00") - //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. - //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first - var tzhr, tzmin int16 - if tzint < 0 { - tzname[3] = '-' // (TODO: verify. this works here) - tzhr, tzmin = -tzint/60, (-tzint)%60 - } else { - tzhr, tzmin = tzint/60, tzint%60 - } - tzname[4] = timeDigits[tzhr/10] - tzname[5] = timeDigits[tzhr%10] - tzname[7] = timeDigits[tzmin/10] - tzname[8] = timeDigits[tzmin%10] - return string(tzname) - //return time.FixedZone(string(tzname), int(tzint)*60) -} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/LICENSE b/vendor/github.com/hashicorp/go-msgpack/v2/LICENSE new file mode 100644 index 0000000000000..95a0f0541cdaa --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/build.sh b/vendor/github.com/hashicorp/go-msgpack/v2/codec/build.sh new file mode 100644 index 0000000000000..831bd86442219 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/build.sh @@ -0,0 +1,263 @@ +#!/bin/bash + +# Run all the different permutations of all the tests and other things +# This helps ensure that nothing gets broken. + +_tests() { + local gover=$( go version | cut -f 3 -d ' ' ) + local a=( "" "codecgen" ) + for i in "${a[@]}" + do + echo ">>>> TAGS: $i" + local i2=${i:-default} + case $gover in + go1.[0-6]*) go vet -printfuncs "errorf" "$@" && + go test ${zargs[*]} -vet off -tags "$i" "$@" ;; + *) go vet -printfuncs "errorf" "$@" && + go test ${zargs[*]} -vet off -tags "alltests $i" -run "Suite" -coverprofile "${i2// /-}.cov.out" "$@" ;; + esac + if [[ "$?" != 0 ]]; then return 1; fi + done + echo "++++++++ TEST SUITES ALL PASSED ++++++++" +} + + +# is a generation needed? +_ng() { + local a="$1" + if [[ ! -e "$a" ]]; then echo 1; return; fi + for i in `ls -1 *.go.tmpl gen.go values_test.go` + do + if [[ "$a" -ot "$i" ]]; then echo 1; return; fi + done +} + +_prependbt() { + cat > ${2} <> ${2} + rm -f ${1} +} + +# _build generates gen-helper.go. +_build() { + if ! [[ "${zforce}" || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi + + if [ "${zbak}" ]; then + _zts=`date '+%m%d%Y_%H%M%S'` + _gg=".generated.go" + [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak + [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak + fi + rm -f gen-helper.generated.go gen.generated.go \ + *_generated_test.go *.generated_ffjson_expose.go + + cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl + cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl + cat >> gen.generated.go <> gen.generated.go < gen-enc-chan.go.tmpl + cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < " + fnameOut + " ______") +fin, err := os.Open(fnameIn) +if err != nil { panic(err) } +defer fin.Close() +fout, err := os.Create(fnameOut) +if err != nil { panic(err) } +defer fout.Close() +err = codec.GenInternalGoFile(fin, fout) +if err != nil { panic(err) } +} + +func main() { +run("gen-helper.go.tmpl", "gen-helper.generated.go") +run("mammoth-test.go.tmpl", "mammoth_generated_test.go") +run("mammoth2-test.go.tmpl", "mammoth2_generated_test.go") +} +EOF + + sed -e 's+// __DO_NOT_REMOVE__NEEDED_FOR_REPLACING__IMPORT_PATH__FOR_CODEC_BENCH__+import . "github.com/hashicorp/go-msgpack/v2/codec"+' \ + shared_test.go > bench/shared_test.go + + # explicitly return 0 if this passes, else return 1 + go run -tags "codecgen.exec" gen-from-tmpl.generated.go && + rm -f gen-from-tmpl.*generated.go && + return 0 + return 1 +} + +_codegenerators() { + local c5="_generated_test.go" + local c7="$PWD/codecgen" + local c8="$c7/__codecgen" + local c9="codecgen-scratch.go" + + if ! [[ $zforce || $(_ng "values_codecgen${c5}") ]]; then return 0; fi + + # Note: ensure you run the codecgen for this codebase/directory i.e. ./codecgen/codecgen + true && + echo "codecgen ... " && + if [[ $zforce || ! -f "$c8" || "$c7/gen.go" -nt "$c8" ]]; then + echo "rebuilding codecgen ... " && ( cd codecgen && go build -o $c8 ${zargs[*]} . ) + fi && + $c8 -rt codecgen -t 'codecgen generated' -o values_codecgen${c5} -d 19780 $zfin $zfin2 && + cp mammoth2_generated_test.go $c9 && + $c8 -o mammoth2_codecgen${c5} -d 19781 mammoth2_generated_test.go && + rm -f $c9 && + echo "generators done!" +} + +_prebuild() { + echo "prebuild: zforce: $zforce" + local d="$PWD" + zfin="test_values.generated.go" + zfin2="test_values_flex.generated.go" + zpkg="github.com/hashicorp/go-msgpack/v2/codec" + # zpkg=${d##*/src/} + # zgobase=${d%%/src/*} + # rm -f *_generated_test.go + rm -f codecgen-*.go && + _build && + cp $d/values_test.go $d/$zfin && + cp $d/values_flex_test.go $d/$zfin2 && + _codegenerators && + if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi && + if [[ $zforce ]]; then go install ${zargs[*]} .; fi && + echo "prebuild done successfully" + rm -f $d/$zfin $d/$zfin2 + unset zfin zfin2 zpkg +} + +_make() { + zforce=1 + (cd codecgen && go install ${zargs[*]} .) && _prebuild && go install ${zargs[*]} . + unset zforce +} + +_clean() { + rm -f gen-from-tmpl.*generated.go \ + codecgen-*.go \ + test_values.generated.go test_values_flex.generated.go +} + +_release() { + local reply + read -p "Pre-release validation takes a few minutes and MUST be run from within GOPATH/src. Confirm y/n? " -n 1 -r reply + echo + if [[ ! $reply =~ ^[Yy]$ ]]; then return 1; fi + + # expects GOROOT, GOROOT_BOOTSTRAP to have been set. + if [[ -z "${GOROOT// }" || -z "${GOROOT_BOOTSTRAP// }" ]]; then return 1; fi + # (cd $GOROOT && git checkout -f master && git pull && git reset --hard) + (cd $GOROOT && git pull) + local f=`pwd`/make.release.out + cat > $f <>$f + if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi + (false || + (echo "===== BUILDING GO SDK for branch: $i ... =====" && + cd $GOROOT && + git checkout -f $i && git reset --hard && git clean -f . && + cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) && + echo "===== GO SDK BUILD DONE =====" && + _prebuild && + echo "===== PREBUILD DONE with exit: $? =====" && + _tests "$@" + if [[ "$?" != 0 ]]; then return 1; fi + done + unset zforce + echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++" +} + +_usage() { + cat < [tests, make, prebuild (force) (external), inlining diagnostics, mid-stack inlining, race detector] + -v -> verbose +EOF + if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi +} + +_main() { + if [[ -z "$1" ]]; then _usage; return 1; fi + local x + unset zforce + zargs=() + zbenchflags="" + OPTIND=1 + while getopts ":ctmnrgpfvlzdb:" flag + do + case "x$flag" in + 'xf') zforce=1 ;; + 'xv') zverbose=1 ;; + 'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;; + 'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;; + 'xd') zargs+=("-race") ;; + 'xb') x='b'; zbenchflags=${OPTARG} ;; + x\?) _usage; return 1 ;; + *) x=$flag ;; + esac + done + shift $((OPTIND-1)) + # echo ">>>> _main: extra args: $@" + case "x$x" in + 'xt') _tests "$@" ;; + 'xm') _make "$@" ;; + 'xr') _release "$@" ;; + 'xg') _go ;; + 'xp') _prebuild "$@" ;; + 'xc') _clean "$@" ;; + 'xz') _analyze "$@" ;; + 'xb') _bench "$@" ;; + esac + unset zforce zargs zbenchflags +} + +[ "." = `dirname $0` ] && _main "$@" \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/codecgen.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/codecgen.go new file mode 100644 index 0000000000000..28fa810593d4d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/codecgen.go @@ -0,0 +1,14 @@ +//go:build codecgen || generated +// +build codecgen generated + +package codec + +// this file is here, to set the codecgen variable to true +// when the build tag codecgen is set. +// +// this allows us do specific things e.g. skip missing fields tests, +// when running in codecgen mode. + +func init() { + codecgen = true +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/decode.go new file mode 100644 index 0000000000000..e0fb62df4929c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/decode.go @@ -0,0 +1,3111 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "runtime" + "strconv" + "time" +) + +// Some tagging information for error messages. +const ( + msgBadDesc = "unrecognized descriptor byte" + // msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +const ( + decDefMaxDepth = 1024 // maximum depth + decDefSliceCap = 8 + decDefChanCap = 64 // should be large, as cap cannot be expanded + decScratchByteArrayLen = cacheLineSize // + (8 * 2) // - (8 * 1) +) + +var ( + errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct" + errstrCannotDecodeIntoNil = "cannot decode into nil" + + errmsgExpandSliceOverflow = "expand slice: slice overflow" + errmsgExpandSliceCannotChange = "expand slice: cannot change" + + errDecoderNotInitialized = errors.New("Decoder not initialized") + + errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read") + errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read") + errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown") + errMaxDepthExceeded = errors.New("maximum decoding depth exceeded") +) + +/* + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +// +// Deprecated: Use decReaderSwitch instead. +type decReader interface { + unreadn1() + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte + readb([]byte) + readn1() uint8 + numread() uint // number of bytes read + track() + stopTrack() []byte + + // skip will skip any byte that matches, and return the first non-matching byte + skip(accept *bitset256) (token byte) + // readTo will read any byte that matches, stopping once no-longer matching. + readTo(in []byte, accept *bitset256) (out []byte) + // readUntil will read, only stopping once it matches the 'stop' byte. + readUntil(in []byte, stop byte) (out []byte) +} + +*/ + +type decDriver interface { + // this will check if the next token is a break. + CheckBreak() bool + // TryDecodeAsNil tries to decode as nil. + // Note: TryDecodeAsNil should be careful not to share any temporary []byte with + // the rest of the decDriver. This is because sometimes, we optimize by holding onto + // a transient []byte, and ensuring the only other call we make to the decDriver + // during that time is maybe a TryDecodeAsNil() call. + TryDecodeAsNil() bool + // ContainerType returns one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + // IsBuiltinType(rt uintptr) bool + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + + // Deprecated: use DecodeInt64 and DecodeUint64 instead + // DecodeInt(bitsize uint8) (i int64) + // DecodeUint(bitsize uint8) (ui uint64) + + DecodeInt64() (i int64) + DecodeUint64() (ui uint64) + + DecodeFloat64() (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + DecodeStringAsBytes() (v []byte) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) + // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + + DecodeTime() (t time.Time) + + ReadArrayStart() int + ReadArrayElem() + ReadArrayEnd() + ReadMapStart() int + ReadMapElemKey() + ReadMapElemValue() + ReadMapEnd() + + reset() + uncacheRead() +} + +type decodeError struct { + codecError + pos int +} + +func (d decodeError) Error() string { + return fmt.Sprintf("%s decode error [pos %d]: %v", d.name, d.pos, d.err) +} + +type decDriverNoopContainerReader struct{} + +func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadArrayElem() {} +func (x decDriverNoopContainerReader) ReadArrayEnd() {} +func (x decDriverNoopContainerReader) ReadMapStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadMapElemKey() {} +func (x decDriverNoopContainerReader) ReadMapElemValue() {} +func (x decDriverNoopContainerReader) ReadMapEnd() {} +func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return } + +// func (x decNoSeparator) uncacheRead() {} + +// DecodeOptions captures configuration options during decode. +type DecodeOptions struct { + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil (unset), we default to map[string]interface{} iff json handle and MapStringAsKey=true, + // else map[interface{}]interface{}. + MapType reflect.Type + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil (unset), we default to []interface{} for all formats. + SliceType reflect.Type + + // MaxInitLen defines the maxinum initial length that we "make" a collection + // (string, slice, map, chan). If 0 or negative, we default to a sensible value + // based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has 2^64 elements. + // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // ReaderBufferSize is the size of the buffer used when reading. + // + // if > 0, we use a smart buffer internally for performance purposes. + ReaderBufferSize int + + // MaxDepth defines the maximum depth when decoding nested + // maps and slices. If 0 or negative, we default to a suitably large number (currently 1024). + MaxDepth int16 + + // If ErrorIfNoField, return an error when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // SliceElementReset: on decoding a slice, reset the element to a zero value first. + // + // concern: if the slice already contained some garbage, we will decode into that garbage. + SliceElementReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // Every string should not be interned. + // An excellent use-case for interning is struct field names, + // or map keys where key type is string. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool + + // DeleteOnNilMapValue controls how to decode a nil value in the stream. + // + // If true, we will delete the mapping of the key. + // Else, just set the mapping to the zero value of the type. + DeleteOnNilMapValue bool + + // RawToString controls how raw bytes in a stream are decoded into a nil interface{}. + // By default, they are decoded as []byte, but can be decoded as string (if configured). + RawToString bool +} + +// ------------------------------------------------ + +type unreadByteStatus uint8 + +// unreadByteStatus goes from +// undefined (when initialized) -- (read) --> canUnread -- (unread) --> canRead ... +const ( + unreadByteUndefined unreadByteStatus = iota + unreadByteCanRead + unreadByteCanUnread +) + +type ioDecReaderCommon struct { + r io.Reader // the reader passed in + + n uint // num read + + l byte // last byte + ls unreadByteStatus // last byte status + trb bool // tracking bytes turned on + _ bool + b [4]byte // tiny buffer for reading single bytes + + tr []byte // tracking bytes read +} + +func (z *ioDecReaderCommon) reset(r io.Reader) { + z.r = r + z.ls = unreadByteUndefined + z.l, z.n = 0, 0 + z.trb = false + if z.tr != nil { + z.tr = z.tr[:0] + } +} + +func (z *ioDecReaderCommon) numread() uint { + return z.n +} + +func (z *ioDecReaderCommon) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReaderCommon) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------------ + +// ioDecReader is a decReader that reads off an io.Reader. +// +// It also has a fallback implementation of ByteScanner if needed. +type ioDecReader struct { + ioDecReaderCommon + + rr io.Reader + br io.ByteScanner + + x [scratchByteArrayLen]byte // for: get struct field name, swallow valueTypeBytes, etc + _ [1]uint64 // padding +} + +func (z *ioDecReader) reset(r io.Reader) { + z.ioDecReaderCommon.reset(r) + + var ok bool + z.rr = r + z.br, ok = r.(io.ByteScanner) + if !ok { + z.br = z + z.rr = z + } +} + +func (z *ioDecReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + var firstByte bool + if z.ls == unreadByteCanRead { + z.ls = unreadByteCanUnread + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = unreadByteCanUnread + } + if firstByte { + n++ + } + return +} + +func (z *ioDecReader) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:1]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecReader) UnreadByte() (err error) { + switch z.ls { + case unreadByteCanUnread: + z.ls = unreadByteCanRead + case unreadByteCanRead: + err = errDecUnreadByteLastByteNotRead + case unreadByteUndefined: + err = errDecUnreadByteNothingToRead + default: + err = errDecUnreadByteUnknown + } + return +} + +func (z *ioDecReader) readx(n uint) (bs []byte) { + if n == 0 { + return + } + if n < uint(len(z.x)) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += uint(len(bs)) + if z.trb { + z.tr = append(z.tr, bs...) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if len(bs) == 0 { + return + } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += uint(len(bs)) + if z.trb { + z.tr = append(z.tr, bs...) + } +} + +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else if err == io.EOF { + eof = true + } else { + panic(err) + } + return +} + +func (z *ioDecReader) readn1() (b uint8) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return + } + panic(err) +} + +func (z *ioDecReader) skip(accept *bitset256) (token byte) { + var eof bool + // for { + // token, eof = z.readn1eof() + // if eof { + // return + // } + // if accept.isset(token) { + // continue + // } + // return + // } +LOOP: + token, eof = z.readn1eof() + if eof { + return + } + if accept.isset(token) { + goto LOOP + } + return +} + +func (z *ioDecReader) readTo(in []byte, accept *bitset256) []byte { + // out = in + + // for { + // token, eof := z.readn1eof() + // if eof { + // return + // } + // if accept.isset(token) { + // out = append(out, token) + // } else { + // z.unreadn1() + // return + // } + // } +LOOP: + token, eof := z.readn1eof() + if eof { + return in + } + if accept.isset(token) { + // out = append(out, token) + in = append(in, token) + goto LOOP + } + z.unreadn1() + return in +} + +func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { + out = in + // for { + // token, eof := z.readn1eof() + // if eof { + // panic(io.EOF) + // } + // out = append(out, token) + // if token == stop { + // return + // } + // } +LOOP: + token, eof := z.readn1eof() + if eof { + panic(io.EOF) + } + out = append(out, token) + if token == stop { + return + } + goto LOOP +} + +//go:noinline +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } +} + +// ------------------------------------ + +type bufioDecReader struct { + ioDecReaderCommon + + c uint // cursor + buf []byte + + bytesBufPooler + + // err error + + // Extensions can call Decode() within a current Decode() call. + // We need to know when the top level Decode() call returns, + // so we can decide whether to Release() or not. + calls uint16 // what depth in mustDecode are we in now. + + _ [6]uint8 // padding + + _ [1]uint64 // padding +} + +func (z *bufioDecReader) reset(r io.Reader, bufsize int) { + z.ioDecReaderCommon.reset(r) + z.c = 0 + z.calls = 0 + if cap(z.buf) >= bufsize { + z.buf = z.buf[:0] + } else { + z.buf = z.bytesBufPooler.get(bufsize)[:0] + // z.buf = make([]byte, 0, bufsize) + } +} + +func (z *bufioDecReader) release() { + z.buf = nil + z.bytesBufPooler.end() +} + +func (z *bufioDecReader) readb(p []byte) { + var n = uint(copy(p, z.buf[z.c:])) + z.n += n + z.c += n + if len(p) == int(n) { + if z.trb { + z.tr = append(z.tr, p...) // cost=9 + } + } else { + z.readbFill(p, n) + } +} + +//go:noinline - fallback when z.buf is consumed +func (z *bufioDecReader) readbFill(p0 []byte, n uint) { + // at this point, there's nothing in z.buf to read (z.buf is fully consumed) + p := p0[n:] + var n2 uint + var err error + if len(p) > cap(z.buf) { + n2, err = decReadFull(z.r, p) + if err != nil { + panic(err) + } + n += n2 + z.n += n2 + // always keep last byte in z.buf + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + // z.c is now 0, and len(p) <= cap(z.buf) +LOOP: + // for len(p) > 0 && z.err == nil { + if len(p) > 0 { + z.buf = z.buf[0:cap(z.buf)] + var n1 int + n1, err = z.r.Read(z.buf) + n2 = uint(n1) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + n2 = uint(copy(p, z.buf)) + z.c = n2 + n += n2 + z.n += n2 + p = p[n2:] + goto LOOP + } + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } +} + +func (z *bufioDecReader) readn1() (b byte) { + // fast-path, so we elide calling into Read() most of the time + if z.c < uint(len(z.buf)) { + b = z.buf[z.c] + z.c++ + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else { // meaning z.c == len(z.buf) or greater ... so need to fill + z.readbFill(z.b[:1], 0) + b = z.b[0] + } + return +} + +func (z *bufioDecReader) unreadn1() { + if z.c == 0 { + panic(errDecUnreadByteNothingToRead) + } + z.c-- + z.n-- + if z.trb { + z.tr = z.tr[:len(z.tr)-1] + } +} + +func (z *bufioDecReader) readx(n uint) (bs []byte) { + if n == 0 { + // return + } else if z.c+n <= uint(len(z.buf)) { + bs = z.buf[z.c : z.c+n] + z.n += n + z.c += n + if z.trb { + z.tr = append(z.tr, bs...) + } + } else { + bs = make([]byte, n) + // n no longer used - can reuse + n = uint(copy(bs, z.buf[z.c:])) + z.n += n + z.c += n + z.readbFill(bs, n) + } + return +} + +//go:noinline - track called by Decoder.nextValueBytes() (called by jsonUnmarshal,rawBytes) +func (z *bufioDecReader) doTrack(y uint) { + z.tr = append(z.tr, z.buf[z.c:y]...) // cost=14??? +} + +func (z *bufioDecReader) skipLoopFn(i uint) { + z.n += (i - z.c) - 1 + i++ + if z.trb { + // z.tr = append(z.tr, z.buf[z.c:i]...) + z.doTrack(i) + } + z.c = i +} + +func (z *bufioDecReader) skip(accept *bitset256) (token byte) { + // token, _ = z.search(nil, accept, 0, 1); return + + // for i := z.c; i < len(z.buf); i++ { + // if token = z.buf[i]; !accept.isset(token) { + // z.skipLoopFn(i) + // return + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + // inline z.skipLoopFn(i) and refactor, so cost is within inline budget + token = z.buf[i] + i++ + if accept.isset(token) { + goto LOOP + } + z.n += i - 2 - z.c + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + return z.skipFill(accept) +} + +func (z *bufioDecReader) skipFill(accept *bitset256) (token byte) { + z.n += uint(len(z.buf)) - z.c + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + var i int + for i, token = range z.buf { + if !accept.isset(token) { + z.skipLoopFn(uint(i)) + return + } + } + // for i := 0; i < n2; i++ { + // if token = z.buf[i]; !accept.isset(token) { + // z.skipLoopFn(i) + // return + // } + // } + z.n += uint(n2) + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } +} + +func (z *bufioDecReader) readToLoopFn(i uint, out0 []byte) (out []byte) { + // out0 is never nil + z.n += (i - z.c) - 1 + out = append(out0, z.buf[z.c:i]...) + if z.trb { + z.doTrack(i) + } + z.c = i + return +} + +func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + // _, out = z.search(in, accept, 0, 2); return + + // for i := z.c; i < len(z.buf); i++ { + // if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, nil) + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, nil) + // inline readToLoopFn here (for performance) + z.n += (i - z.c) - 1 + out = z.buf[z.c:i] + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + i++ + goto LOOP + } + return z.readToFill(in, accept) +} + +func (z *bufioDecReader) readToFill(in []byte, accept *bitset256) (out []byte) { + z.n += uint(len(z.buf)) - z.c + out = append(in, z.buf[z.c:]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 == 0 && err != nil { + if err == io.EOF { + return // readTo should read until it matches or end is reached + } + panic(err) + } + z.buf = z.buf[:n2] + for i, token := range z.buf { + if !accept.isset(token) { + return z.readToLoopFn(uint(i), out) + } + } + // for i := 0; i < n2; i++ { + // if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, out) + // } + // } + out = append(out, z.buf...) + z.n += uint(n2) + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } +} + +func (z *bufioDecReader) readUntilLoopFn(i uint, out0 []byte) (out []byte) { + z.n += (i - z.c) - 1 + i++ + out = append(out0, z.buf[z.c:i]...) + if z.trb { + // z.tr = append(z.tr, z.buf[z.c:i]...) + z.doTrack(i) + } + z.c = i + return +} + +func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { + // _, out = z.search(in, nil, stop, 4); return + + // for i := z.c; i < len(z.buf); i++ { + // if z.buf[i] == stop { + // return z.readUntilLoopFn(i, nil) + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + if z.buf[i] == stop { + // inline readUntilLoopFn + // return z.readUntilLoopFn(i, nil) + z.n += (i - z.c) - 1 + i++ + out = z.buf[z.c:i] + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + i++ + goto LOOP + } + return z.readUntilFill(in, stop) +} + +func (z *bufioDecReader) readUntilFill(in []byte, stop byte) (out []byte) { + z.n += uint(len(z.buf)) - z.c + out = append(in, z.buf[z.c:]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n1 int + var n2 uint + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n1, err = z.r.Read(z.buf) + n2 = uint(n1) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + for i, token := range z.buf { + if token == stop { + return z.readUntilLoopFn(uint(i), out) + } + } + // for i := 0; i < n2; i++ { + // if z.buf[i] == stop { + // return z.readUntilLoopFn(i, out) + // } + // } + out = append(out, z.buf...) + z.n += n2 + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } +} + +// ------------------------------------ + +var errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c uint // cursor + t uint // track start + // a int // available +} + +func (z *bytesDecReader) reset(in []byte) { + z.b = in + // z.a = len(in) + z.c = 0 + z.t = 0 +} + +func (z *bytesDecReader) numread() uint { + return z.c +} + +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(errBytesDecReaderCannotUnread) + } + z.c-- + // z.a++ +} + +func (z *bytesDecReader) readx(n uint) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + // if n <= 0 { + // } else if z.a == 0 { + // panic(io.EOF) + // } else if n > z.a { + // panic(io.ErrUnexpectedEOF) + // } else { + // c0 := z.c + // z.c = c0 + n + // z.a = z.a - n + // bs = z.b[c0:z.c] + // } + // return + + if n != 0 { + z.c += n + if z.c > uint(len(z.b)) { + z.c = uint(len(z.b)) + panic(io.EOF) + } + bs = z.b[z.c-n : z.c] + } + return + + // if n == 0 { + // } else if z.c+n > uint(len(z.b)) { + // z.c = uint(len(z.b)) + // panic(io.EOF) + // } else { + // z.c += n + // bs = z.b[z.c-n : z.c] + // } + // return + + // if n == 0 { + // return + // } + // if z.c == uint(len(z.b)) { + // panic(io.EOF) + // } + // if z.c+n > uint(len(z.b)) { + // panic(io.ErrUnexpectedEOF) + // } + // // z.a -= n + // z.c += n + // return z.b[z.c-n : z.c] +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(uint(len(bs)))) +} + +func (z *bytesDecReader) readn1() (v uint8) { + if z.c == uint(len(z.b)) { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + // z.a-- + return +} + +// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { +// if z.a == 0 { +// eof = true +// return +// } +// v = z.b[z.c] +// z.c++ +// z.a-- +// return +// } + +func (z *bytesDecReader) skip(accept *bitset256) (token byte) { + i := z.c + // if i == len(z.b) { + // goto END + // // panic(io.EOF) + // } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if !accept.isset(z.b[i]) { + // token = z.b[i] + // i++ + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } + + // i := z.c +LOOP: + if i < uint(len(z.b)) { + token = z.b[i] + i++ + if accept.isset(token) { + goto LOOP + } + // z.a -= (i - z.c) + z.c = i + return + } + // END: + panic(io.EOF) + // // z.a = 0 + // z.c = blen + // return +} + +func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { + return z.readToNoInput(accept) +} + +func (z *bytesDecReader) readToNoInput(accept *bitset256) (out []byte) { + i := z.c + if i == uint(len(z.b)) { + panic(io.EOF) + } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if !accept.isset(z.b[i]) { + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } + // out = z.b[z.c:] + // z.a, z.c = 0, blen + // return + + // i := z.c + // LOOP: + // if i < blen { + // if accept.isset(z.b[i]) { + // i++ + // goto LOOP + // } + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // out = z.b[z.c:] + // // z.a, z.c = 0, blen + // z.a = 0 + // z.c = blen + // return + + // c := i +LOOP: + if i < uint(len(z.b)) { + if accept.isset(z.b[i]) { + i++ + goto LOOP + } + } + + out = z.b[z.c:i] + // z.a -= (i - z.c) + z.c = i + return // z.b[c:i] + // z.c, i = i, z.c + // return z.b[i:z.c] +} + +func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { + return z.readUntilNoInput(stop) +} + +func (z *bytesDecReader) readUntilNoInput(stop byte) (out []byte) { + i := z.c + // if i == len(z.b) { + // panic(io.EOF) + // } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if z.b[i] == stop { + // i++ + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } +LOOP: + if i < uint(len(z.b)) { + if z.b[i] == stop { + i++ + out = z.b[z.c:i] + // z.a -= (i - z.c) + z.c = i + return + } + i++ + goto LOOP + } + // z.a = 0 + // z.c = blen + panic(io.EOF) +} + +func (z *bytesDecReader) track() { + z.t = z.c +} + +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] +} + +// ---------------------------------------- + +// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { +// d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) +// } + +func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), 0, nil) +} + +func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) +} + +func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(d) +} + +func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs := d.d.DecodeBytes(nil, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(jsonUnmarshaler) + // bs := d.d.DecodeBytes(d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { + d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +// var kIntfCtr uint64 + +func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { + // nil interface: + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + n := d.naked() + d.d.DecodeNaked() + if n.v == valueTypeNil { + return + } + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return + } + // var useRvn bool + switch n.v { + case valueTypeMap: + // if json, default to a map type with string keys + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } else if mtid == mapStrIntfTypId { // for json performance + var v2 map[string]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } else { + if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.MapType).Elem() + d.decodeValue(rvn, nil, true) + } + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + if d.stid == 0 && d.h.PreferArrayOverSlice { + rvn2 := reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + rvn = rvn2 + } + } else { + if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.SliceType).Elem() + d.decodeValue(rvn, nil, true) + } + } + case valueTypeExt: + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + d.decode(&v) + } + bfn := d.h.getExtForTag(tag) + if bfn == nil { + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + re.Value = v + rvn = reflect.ValueOf(&re).Elem() + } else { + rvnA := reflect.New(bfn.rt) + if bytes != nil { + bfn.ext.ReadExt(rv2i(rvnA), bytes) + } else { + bfn.ext.UpdateExt(rv2i(rvnA), v) + } + rvn = rvnA.Elem() + } + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v) + } + return +} + +func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + // every interface passed here MUST be settable. + var rvn reflect.Value + if rv.IsNil() || d.h.InterfaceReset { + // check if mapping to a type: if so, initialize it and move on + rvn = d.h.intf2impl(f.ti.rtid) + if rvn.IsValid() { + rv.Set(rvn) + } else { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rv.Set(rvn) + } else if d.h.InterfaceReset { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + return + } + } else { + // now we have a non-nil interface value, meaning it contains a type + rvn = rv.Elem() + } + if d.d.TryDecodeAsNil() { + rv.Set(reflect.Zero(rvn.Type())) + return + } + + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we MAY have to create a decodable value out of the underlying value, + // decode into it, and reset the interface itself. + // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) + + rvn2, canDecode := isDecodeable(rvn) + if canDecode { + d.decodeValue(rvn2, nil, true) + return + } + + rvn2 = reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + d.decodeValue(rvn2, nil, true) + rv.Set(rvn2) +} + +func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) { + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + + if keyType == valueTypeString { + rvkencname = dd.DecodeStringAsBytes() + } else if keyType == valueTypeInt { + rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10) + } else if keyType == valueTypeUint { + rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10) + } else if keyType == valueTypeFloat { + rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64) + } else { + rvkencname = dd.DecodeStringAsBytes() + } + return rvkencname +} + +func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + dd := d.d + elemsep := d.esep + sfn := structFieldNode{v: rv, update: true} + ctyp := dd.ContainerType() + var mf MissingFielder + if fti.mf { + mf = rv2i(rv).(MissingFielder) + } else if fti.mfp { + mf = rv2i(rv.Addr()).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() + if containerLen == 0 { + dd.ReadMapEnd() + return + } + d.depthIncr() + tisfi := fti.sfiSort + hasLen := containerLen >= 0 + + var rvkencname []byte + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadMapElemKey() + } + rvkencname = decStructFieldKey(dd, fti.keyType, &d.b) + if elemsep { + dd.ReadMapElemValue() + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, true) + } + } else if mf != nil { + // store rvkencname in new []byte, as it previously shares Decoder.b, which is used in decode + name2 := rvkencname + rvkencname = make([]byte, len(rvkencname)) + copy(rvkencname, name2) + + var f interface{} + // xdebugf("kStruct: mf != nil: before decode: rvkencname: %s", rvkencname) + d.decode(&f) + // xdebugf("kStruct: mf != nil: after decode: rvkencname: %s", rvkencname) + if !mf.CodecMissingField(rvkencname, f) && d.h.ErrorIfNoField { + d.errorf("no matching struct field found when decoding stream map with key: %s ", + stringView(rvkencname)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + // keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop + } + dd.ReadMapEnd() + d.depthDecr() + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() + if containerLen == 0 { + dd.ReadArrayEnd() + return + } + d.depthIncr() + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + var checkbreak bool + for j, si := range fti.sfiSrc { + if hasLen && j == containerLen { + break + } + if !hasLen && dd.CheckBreak() { + checkbreak = true + break + } + if elemsep { + dd.ReadArrayElem() + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, true) + } + } + if (hasLen && containerLen > len(fti.sfiSrc)) || (!hasLen && !checkbreak) { + // read remaining values and throw away + for j := len(fti.sfiSrc); ; j++ { + if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { + break + } + if elemsep { + dd.ReadArrayElem() + } + d.structFieldNotFound(j, "") + } + } + dd.ReadArrayEnd() + d.depthDecr() + } else { + d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct) + return + } +} + +func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { + // A slice can be set from a map or array in stream. + // This way, the order can be kept (as order is lost with map). + ti := f.ti + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 { + d.errorf("receive-only channel cannot be decoded") + } + dd := d.d + rtelem0 := ti.elem + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, true) + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false) + // if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else if len(rvbs) > 0 && len(bs2) > 0 { + copy(rvbs, bs2) + } + } + } + return + } + + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if rv.CanSet() { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } + } + } + slh.End() + return + } + + d.depthIncr() + + rtelem0Size := int(rtelem0.Size()) + rtElem0Kind := rtelem0.Kind() + rtelem0Mut := !isImmutableKind(rtElem0Kind) + rtelem := rtelem0 + rtelemkind := rtelem.Kind() + for rtelemkind == reflect.Ptr { + rtelem = rtelem.Elem() + rtelemkind = rtelem.Kind() + } + + var fn *codecFn + + var rvCanset = rv.CanSet() + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + rvlen := rv.Len() + rvcap := rv.Cap() + hasLen := containerLenS > 0 + if hasLen && f.seq == seqTypeSlice { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) + if rvlen <= rvcap { + if rvCanset { + rv.SetLen(rvlen) + } + } else if rvCanset { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvcap = rvlen + rvChanged = true + } else { + d.errorf("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + } else if containerLenS != rvlen { + rvlen = containerLenS + if rvCanset { + rv.SetLen(rvlen) + } + // else { + // rv = rv.Slice(0, rvlen) + // rvChanged = true + // d.errorf("cannot decode into non-settable slice") + // } + } + } + + // consider creating new element once, and just decoding into it. + var rtelem0Zero reflect.Value + var rtelem0ZeroValid bool + var decodeAsNil bool + var j int + + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { + if hasLen { + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) + } else if f.seq == seqTypeSlice { + rvlen = decDefSliceCap + } else { + rvlen = decDefChanCap + } + if rvCanset { + if f.seq == seqTypeSlice { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } else { // chan + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } + } else { + d.errorf("cannot decode into non-settable slice") + } + } + slh.ElemContainerState(j) + decodeAsNil = dd.TryDecodeAsNil() + if f.seq == seqTypeChan { + if decodeAsNil { + rv.Send(reflect.Zero(rtelem0)) + continue + } + if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { + rv9 = reflect.New(rtelem0).Elem() + } + if fn == nil { + fn = d.h.fn(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs + var rvcap2 int + var rvErrmsg2 string + rv9, rvcap2, rvChanged, rvErrmsg2 = + expandSliceRV(rv, ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap) + if rvErrmsg2 != "" { + d.errorf(rvErrmsg2) + } + rvlen++ + if rvChanged { + rv = rv9 + rvcap = rvcap2 + } + } + } + if decodeIntoBlank { + if !decodeAsNil { + d.swallow() + } + } else { + rv9 = rv.Index(j) + if d.h.SliceElementReset || decodeAsNil { + if !rtelem0ZeroValid { + rtelem0ZeroValid = true + rtelem0Zero = reflect.Zero(rtelem0) + } + rv9.Set(rtelem0Zero) + if decodeAsNil { + continue + } + } + + if fn == nil { + fn = d.h.fn(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + if rv.CanSet() { + rv.SetLen(j) + } else if rvCanset { + rv = rv.Slice(0, j) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + rvlen = j + } else if j == 0 && rv.IsNil() { + if rvCanset { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + } + } + slh.End() + + if rvChanged { // infers rvCanset=true, so it can be reset + rv0.Set(rv) + } + + d.depthDecr() +} + +// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { +// // d.decodeValueFn(rv.Slice(0, rv.Len())) +// f.kSlice(rv.Slice(0, rv.Len())) +// } + +func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { + dd := d.d + containerLen := dd.ReadMapStart() + elemsep := d.esep + ti := f.ti + if rv.IsNil() { + rvlen := decInferLen(containerLen, d.h.MaxInitLen, int(ti.key.Size()+ti.elem.Size())) + rv.Set(makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + dd.ReadMapEnd() + return + } + + d.depthIncr() + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := vtype.Kind() + + var keyFn, valFn *codecFn + var ktypeLo, vtypeLo reflect.Type + + for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + + for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + + var mapGet, mapSet bool + rvvImmut := isImmutableKind(vtypeKind) + if !d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !d.h.InterfaceReset { + mapGet = true + } + } else if !rvvImmut { + mapGet = true + } + } + + var rvk, rvkp, rvv, rvz reflect.Value + rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen > 0 + var kstrbs []byte + + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if rvkMut || !rvkp.IsValid() { + rvkp = reflect.New(ktype) + rvk = rvkp.Elem() + } + if elemsep { + dd.ReadMapElemKey() + } + // if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block + // // Previously, if a nil key, we just ignored the mapped value and continued. + // // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} + // // to be an empty map. + // // Instead, we treat a nil key as the zero value of the type. + // rvk.Set(reflect.Zero(ktype)) + // } else if ktypeIsString { + if ktypeIsString { + kstrbs = dd.DecodeStringAsBytes() + rvk.SetString(stringView(kstrbs)) + // NOTE: if doing an insert, you MUST use a real string (not stringview) + } else { + if keyFn == nil { + keyFn = d.h.fn(ktypeLo, true, true) + } + d.decodeValue(rvk, keyFn, true) + } + // special case if a byte array. + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() { + if rvk2.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk2.Bytes())) + } else { + rvk = rvk2 + } + } + } + + if elemsep { + dd.ReadMapElemValue() + } + + // Brittle, but OK per TryDecodeAsNil() contract. + // i.e. TryDecodeAsNil never shares slices with other decDriver procedures + if dd.TryDecodeAsNil() { + if ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if d.h.DeleteOnNilMapValue { + rv.SetMapIndex(rvk, reflect.Value{}) + } else { + rv.SetMapIndex(rvk, reflect.Zero(vtype)) + } + continue + } + + mapSet = true // set to false if u do a get, and its a non-nil pointer + if mapGet { + // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. + rvv = rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } else if vtypeKind == reflect.Ptr { + if rvv.IsNil() { + rvv = reflect.New(vtype).Elem() + } else { + mapSet = false + } + } else if vtypeKind == reflect.Interface { + // not addressable, and thus not settable. + // e MUST create a settable/addressable variant + rvv2 := reflect.New(rvv.Type()).Elem() + if !rvv.IsNil() { + rvv2.Set(rvv) + } + rvv = rvv2 + } + // else it is ~mutable, and we can just decode into it directly + } else if rvvImmut { + if !rvz.IsValid() { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } else { + rvv = reflect.New(vtype).Elem() + } + + // We MUST be done with the stringview of the key, before decoding the value + // so that we don't bastardize the reused byte array. + if mapSet && ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if valFn == nil { + valFn = d.h.fn(vtypeLo, true, true) + } + d.decodeValue(rvv, valFn, true) + // d.decodeValueFn(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + // if ktypeIsString { + // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop + // } + } + + dd.ReadMapEnd() + + d.depthDecr() +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accommodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. + +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + + // primitives below + u uint64 + i int64 + f float64 + l []byte + s string + + // ---- cpu cache line boundary? + t time.Time + b bool + + // state + v valueType + _ [6]bool // padding + + // ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above + // + // _ [3]uint64 // padding +} + +// func (n *decNaked) init() { +// n.ru = reflect.ValueOf(&n.u).Elem() +// n.ri = reflect.ValueOf(&n.i).Elem() +// n.rf = reflect.ValueOf(&n.f).Elem() +// n.rl = reflect.ValueOf(&n.l).Elem() +// n.rs = reflect.ValueOf(&n.s).Elem() +// n.rt = reflect.ValueOf(&n.t).Elem() +// n.rb = reflect.ValueOf(&n.b).Elem() +// // n.rr[] = reflect.ValueOf(&n.) +// } + +// type decNakedPooler struct { +// n *decNaked +// nsp *sync.Pool +// } + +// // naked must be called before each call to .DecodeNaked, as they will use it. +// func (d *decNakedPooler) naked() *decNaked { +// if d.n == nil { +// // consider one of: +// // - get from sync.Pool (if GC is frequent, there's no value here) +// // - new alloc (safest. only init'ed if it a naked decode will be done) +// // - field in Decoder (makes the Decoder struct very big) +// // To support using a decoder where a DecodeNaked is not needed, +// // we prefer #1 or #2. +// // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool +// // d.n.init() +// var v interface{} +// d.nsp, v = pool.decNaked() +// d.n = v.(*decNaked) +// } +// return d.n +// } + +// func (d *decNakedPooler) end() { +// if d.n != nil { +// // if n != nil, then nsp != nil (they are always set together) +// d.nsp.Put(d.n) +// d.n, d.nsp = nil, nil +// } +// } + +// type rtid2rv struct { +// rtid uintptr +// rv reflect.Value +// } + +// -------------- + +type decReaderSwitch struct { + rb bytesDecReader + // ---- cpu cache line boundary? + ri *ioDecReader + bi *bufioDecReader + + mtr, str bool // whether maptype or slicetype are known types + + be bool // is binary encoding + js bool // is json handle + jsms bool // is json handle, and MapKeyAsString + esep bool // has elem separators + + // typ entryType + bytes bool // is bytes reader + bufio bool // is this a bufioDecReader? +} + +// numread, track and stopTrack are always inlined, as they just check int fields, etc. + +/* +func (z *decReaderSwitch) numread() int { + switch z.typ { + case entryTypeBytes: + return z.rb.numread() + case entryTypeIo: + return z.ri.numread() + default: + return z.bi.numread() + } +} +func (z *decReaderSwitch) track() { + switch z.typ { + case entryTypeBytes: + z.rb.track() + case entryTypeIo: + z.ri.track() + default: + z.bi.track() + } +} +func (z *decReaderSwitch) stopTrack() []byte { + switch z.typ { + case entryTypeBytes: + return z.rb.stopTrack() + case entryTypeIo: + return z.ri.stopTrack() + default: + return z.bi.stopTrack() + } +} + +func (z *decReaderSwitch) unreadn1() { + switch z.typ { + case entryTypeBytes: + z.rb.unreadn1() + case entryTypeIo: + z.ri.unreadn1() + default: + z.bi.unreadn1() + } +} +func (z *decReaderSwitch) readx(n int) []byte { + switch z.typ { + case entryTypeBytes: + return z.rb.readx(n) + case entryTypeIo: + return z.ri.readx(n) + default: + return z.bi.readx(n) + } +} +func (z *decReaderSwitch) readb(s []byte) { + switch z.typ { + case entryTypeBytes: + z.rb.readb(s) + case entryTypeIo: + z.ri.readb(s) + default: + z.bi.readb(s) + } +} +func (z *decReaderSwitch) readn1() uint8 { + switch z.typ { + case entryTypeBytes: + return z.rb.readn1() + case entryTypeIo: + return z.ri.readn1() + default: + return z.bi.readn1() + } +} +func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.skip(accept) + case entryTypeIo: + return z.ri.skip(accept) + default: + return z.bi.skip(accept) + } +} +func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.readTo(in, accept) + case entryTypeIo: + return z.ri.readTo(in, accept) + default: + return z.bi.readTo(in, accept) + } +} +func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.readUntil(in, stop) + case entryTypeIo: + return z.ri.readUntil(in, stop) + default: + return z.bi.readUntil(in, stop) + } +} + +*/ + +// the if/else-if/else block is expensive to inline. +// Each node of this construct costs a lot and dominates the budget. +// Best to only do an if fast-path else block (so fast-path is inlined). +// This is irrespective of inlineExtraCallCost set in $GOROOT/src/cmd/compile/internal/gc/inl.go +// +// In decReaderSwitch methods below, we delegate all IO functions into their own methods. +// This allows for the inlining of the common path when z.bytes=true. +// Go 1.12+ supports inlining methods with up to 1 inlined function (or 2 if no other constructs). + +func (z *decReaderSwitch) numread() uint { + if z.bytes { + return z.rb.numread() + } else if z.bufio { + return z.bi.numread() + } else { + return z.ri.numread() + } +} +func (z *decReaderSwitch) track() { + if z.bytes { + z.rb.track() + } else if z.bufio { + z.bi.track() + } else { + z.ri.track() + } +} +func (z *decReaderSwitch) stopTrack() []byte { + if z.bytes { + return z.rb.stopTrack() + } else if z.bufio { + return z.bi.stopTrack() + } else { + return z.ri.stopTrack() + } +} + +// func (z *decReaderSwitch) unreadn1() { +// if z.bytes { +// z.rb.unreadn1() +// } else { +// z.unreadn1IO() +// } +// } +// func (z *decReaderSwitch) unreadn1IO() { +// if z.bufio { +// z.bi.unreadn1() +// } else { +// z.ri.unreadn1() +// } +// } + +func (z *decReaderSwitch) unreadn1() { + if z.bytes { + z.rb.unreadn1() + } else if z.bufio { + z.bi.unreadn1() + } else { + z.ri.unreadn1() // not inlined + } +} + +func (z *decReaderSwitch) readx(n uint) []byte { + if z.bytes { + return z.rb.readx(n) + } + return z.readxIO(n) +} +func (z *decReaderSwitch) readxIO(n uint) []byte { + if z.bufio { + return z.bi.readx(n) + } + return z.ri.readx(n) +} + +func (z *decReaderSwitch) readb(s []byte) { + if z.bytes { + z.rb.readb(s) + } else { + z.readbIO(s) + } +} + +//go:noinline - fallback for io, ensures z.bytes path is inlined +func (z *decReaderSwitch) readbIO(s []byte) { + if z.bufio { + z.bi.readb(s) + } else { + z.ri.readb(s) + } +} + +func (z *decReaderSwitch) readn1() uint8 { + if z.bytes { + return z.rb.readn1() + } + return z.readn1IO() +} +func (z *decReaderSwitch) readn1IO() uint8 { + if z.bufio { + return z.bi.readn1() + } + return z.ri.readn1() +} + +func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { + if z.bytes { + return z.rb.skip(accept) + } + return z.skipIO(accept) +} +func (z *decReaderSwitch) skipIO(accept *bitset256) (token byte) { + if z.bufio { + return z.bi.skip(accept) + } + return z.ri.skip(accept) +} + +func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { + if z.bytes { + return z.rb.readToNoInput(accept) // z.rb.readTo(in, accept) + } + return z.readToIO(in, accept) +} + +//go:noinline - fallback for io, ensures z.bytes path is inlined +func (z *decReaderSwitch) readToIO(in []byte, accept *bitset256) (out []byte) { + if z.bufio { + return z.bi.readTo(in, accept) + } + return z.ri.readTo(in, accept) +} +func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { + if z.bytes { + return z.rb.readUntilNoInput(stop) + } + return z.readUntilIO(in, stop) +} + +func (z *decReaderSwitch) readUntilIO(in []byte, stop byte) (out []byte) { + if z.bufio { + return z.bi.readUntil(in, stop) + } + return z.ri.readUntil(in, stop) +} + +// Decoder reads and decodes an object from an input stream in a supported format. +// +// Decoder is NOT safe for concurrent use i.e. a Decoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Decoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. +type Decoder struct { + panicHdl + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). + + d decDriver + + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r *decReaderSwitch + + // bi *bufioDecReader + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr + + hh Handle + h *BasicHandle + + // ---- cpu cache line boundary? + decReaderSwitch + + // ---- cpu cache line boundary? + n decNaked + + // cr containerStateRecv + err error + + depth int16 + maxdepth int16 + + _ [4]uint8 // padding + + is map[string]string // used for interning strings + + // ---- cpu cache line boundary? + b [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers + + // padding - false sharing help // modify 232 if Decoder struct changes. + // _ [cacheLineSize - 232%cacheLineSize]byte +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle +// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + d := newDecoder(h) + d.Reset(r) + return d +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +// var defaultDecNaked decNaked + +func newDecoder(h Handle) *Decoder { + d := &Decoder{h: basicHandle(h), err: errDecoderNotInitialized} + d.bytes = true + if useFinalizers { + runtime.SetFinalizer(d, (*Decoder).finalize) + // xdebugf(">>>> new(Decoder) with finalizer") + } + d.r = &d.decReaderSwitch + d.hh = h + d.be = h.isBinary() + // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() + var jh *JsonHandle + jh, d.js = h.(*JsonHandle) + if d.js { + d.jsms = jh.MapKeyAsString + } + d.esep = d.hh.hasElemSeparators() + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + // d.cr, _ = d.d.(containerStateRecv) + return d +} + +func (d *Decoder) resetCommon() { + // d.r = &d.decReaderSwitch + d.d.reset() + d.err = nil + d.depth = 0 + d.maxdepth = d.h.MaxDepth + if d.maxdepth <= 0 { + d.maxdepth = decDefMaxDepth + } + // reset all things which were cached from the Handle, but could change + d.mtid, d.stid = 0, 0 + d.mtr, d.str = false, false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + d.mtr = fastpathAV.index(d.mtid) != -1 + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + d.str = fastpathAV.index(d.stid) != -1 + } +} + +// Reset the Decoder with a new Reader to decode from, +// clearing all state from last run(s). +func (d *Decoder) Reset(r io.Reader) { + if r == nil { + return + } + d.bytes = false + // d.typ = entryTypeUnset + if d.h.ReaderBufferSize > 0 { + if d.bi == nil { + d.bi = new(bufioDecReader) + } + d.bi.reset(r, d.h.ReaderBufferSize) + // d.r = d.bi + // d.typ = entryTypeBufio + d.bufio = true + } else { + // d.ri.x = &d.b + // d.s = d.sa[:0] + if d.ri == nil { + d.ri = new(ioDecReader) + } + d.ri.reset(r) + // d.r = d.ri + // d.typ = entryTypeIo + d.bufio = false + } + d.resetCommon() +} + +// ResetBytes resets the Decoder with a new []byte to decode from, +// clearing all state from last run(s). +func (d *Decoder) ResetBytes(in []byte) { + if in == nil { + return + } + d.bytes = true + d.bufio = false + // d.typ = entryTypeBytes + d.rb.reset(in) + // d.r = &d.rb + d.resetCommon() +} + +func (d *Decoder) naked() *decNaked { + return &d.n +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container i.e. the values +// in these containers will not be zero'ed before decoding. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// This in-place update maintains consistency in the decoding philosophy (i.e. we ALWAYS update +// in place by default). However, the consequence of this is that values in slices or maps +// which are not zero'ed before hand, will have part of the prior values in place after decode +// if the stream doesn't contain an update for those parts. +// +// This in-place update can be disabled by configuring the MapValueReset and SliceElementReset +// decode options available on every handle. +// +// Furthermore, when decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +// Note: we allow nil values in the stream anywhere except for map keys. +// A nil value in the encoded stream where a map key is expected is treated as an error. +func (d *Decoder) Decode(v interface{}) (err error) { + // tried to use closure, as runtime optimizes defer with no params. + // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). + // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 + // defer func() { d.deferred(&err) }() + // { x, y := d, &err; defer func() { x.deferred(y) }() } + if d.err != nil { + return d.err + } + if recoverPanicToErr { + defer func() { + if x := recover(); x != nil { + panicValToErr(d, x, &d.err) + err = d.err + } + }() + } + + // defer d.deferred(&err) + d.mustDecode(v) + return +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + if d.err != nil { + panic(d.err) + } + d.mustDecode(v) +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) mustDecode(v interface{}) { + // TODO: Top-level: ensure that v is a pointer and not nil. + if d.d.TryDecodeAsNil() { + setZero(v) + return + } + if d.bi == nil { + d.decode(v) + return + } + + d.bi.calls++ + d.decode(v) + // xprintf.(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) + d.bi.calls-- + if !d.h.ExplicitRelease && d.bi.calls == 0 { + d.bi.release() + } +} + +// func (d *Decoder) deferred(err1 *error) { +// if recoverPanicToErr { +// if x := recover(); x != nil { +// panicValToErr(d, x, err1) +// panicValToErr(d, x, &d.err) +// } +// } +// } + +//go:noinline -- as it is run by finalizer +func (d *Decoder) finalize() { + // xdebugf("finalizing Decoder") + d.Release() +} + +// Release releases shared (pooled) resources. +// +// It is important to call Release() when done with a Decoder, so those resources +// are released instantly for use by subsequently created Decoders. +// +// By default, Release() is automatically called unless the option ExplicitRelease is set. +func (d *Decoder) Release() { + if d.bi != nil { + d.bi.release() + } + // d.decNakedPooler.end() +} + +// // this is not a smart swallow, as it allocates objects and does unnecessary work. +// func (d *Decoder) swallowViaHammer() { +// var blank interface{} +// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) +// } + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + elemsep := d.esep + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + d.depthIncr() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} + if elemsep { + dd.ReadMapElemKey() + } + d.swallow() + if elemsep { + dd.ReadMapElemValue() + } + d.swallow() + } + dd.ReadMapEnd() + d.depthDecr() + case valueTypeArray: + containerLen := dd.ReadArrayStart() + d.depthIncr() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadArrayElem() + } + d.swallow() + } + dd.ReadArrayEnd() + d.depthDecr() + case valueTypeBytes: + dd.DecodeBytes(d.b[:], true) + case valueTypeString: + dd.DecodeStringAsBytes() + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + n := d.naked() + dd.DecodeNaked() + if n.v == valueTypeExt && n.l == nil { + var v2 interface{} + d.decode(&v2) + } + } +} + +func setZero(iv interface{}) { + if iv == nil || definitelyNil(iv) { + return + } + var canDecode bool + switch v := iv.(type) { + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case *time.Time: + *v = time.Time{} + case reflect.Value: + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + default: + if !fastpathDecodeSetZeroTypeSwitch(iv) { + v := reflect.ValueOf(iv) + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + } + } +} + +func (d *Decoder) decode(iv interface{}) { + // a switch with only concrete types can be optimized. + // consequently, we deal with nil and interfaces outside the switch. + + if iv == nil { + d.errorstr(errstrCannotDecodeIntoNil) + return + } + + switch v := iv.(type) { + // case nil: + // case Selfer: + case reflect.Value: + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, true) + + case *string: + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *float32: + f64 := d.d.DecodeFloat64() + if chkOvf.Float32(f64) { + d.errorf("float32 overflow: %v", f64) + } + *v = float32(f64) + case *float64: + *v = d.d.DecodeFloat64() + case *[]uint8: + *v = d.d.DecodeBytes(*v, false) + case []uint8: + b := d.d.DecodeBytes(v, false) + if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) { + copy(v, b) + } + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem(), nil, true) + // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) + + default: + if v, ok := iv.(Selfer); ok { + v.CodecDecodeSelf(d) + } else if !fastpathDecodeTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false) + // d.decodeValueFallback(v) + } + } +} + +func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) { + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + var rvp reflect.Value + var rvpValid bool + if rv.Kind() == reflect.Ptr { + rvpValid = true + for { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + if rv.Kind() != reflect.Ptr { + break + } + } + } + + if fn == nil { + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = d.h.fn(rv.Type(), chkAll, true) // chkAll, chkAll) + } + if fn.i.addrD { + if rvpValid { + fn.fd(d, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fd(d, &fn.i, rv.Addr()) + } else if !fn.i.addrF { + fn.fd(d, &fn.i, rv) + } else { + d.errorf("cannot decode into a non-pointer value") + } + } else { + fn.fd(d, &fn.i, rv) + } + // return rv +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + return + } + } + d.swallow() +} + +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} + +func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { + switch rv.Kind() { + case reflect.Array: + return rv, rv.CanAddr() + case reflect.Ptr: + if !rv.IsNil() { + return rv.Elem(), true + } + case reflect.Slice, reflect.Chan, reflect.Map: + if !rv.IsNil() { + return rv, true + } + } + return +} + +func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { + // decode can take any reflect.Value that is a inherently addressable i.e. + // - array + // - non-nil chan (we will SEND to it) + // - non-nil slice (we will set its elements) + // - non-nil map (we will put into it) + // - non-nil pointer (we can "update" it) + rv2, canDecode := isDecodeable(rv) + if canDecode { + return + } + if !rv.IsValid() { + d.errorstr(errstrCannotDecodeIntoNil) + return + } + if !rv.CanInterface() { + d.errorf("cannot decode into a value without an interface: %v", rv) + return + } + rvi := rv2i(rv) + rvk := rv.Kind() + d.errorf("cannot decode into value of kind: %v, type: %T, %v", rvk, rvi, rvi) + return +} + +func (d *Decoder) depthIncr() { + d.depth++ + if d.depth >= d.maxdepth { + panic(errMaxDepthExceeded) + } +} + +func (d *Decoder) depthDecr() { + d.depth-- +} + +// Possibly get an interned version of a string +// +// This should mostly be used for map keys, where the key type is string. +// This is because keys of a map/struct are typically reused across many objects. +func (d *Decoder) string(v []byte) (s string) { + if d.is == nil { + return string(v) // don't return stringView, as we need a real string here. + } + s, ok := d.is[string(v)] // no allocation here, per go implementation + if !ok { + s = string(v) // new allocation here + d.is[s] = s + } + return s +} + +// nextValueBytes returns the next value in the stream as a set of bytes. +func (d *Decoder) nextValueBytes() (bs []byte) { + d.d.uncacheRead() + d.r.track() + d.swallow() + bs = d.r.stopTrack() + return +} + +func (d *Decoder) rawBytes() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + +func (d *Decoder) wrapErr(v interface{}, err *error) { + *err = decodeError{codecError: codecError{name: d.hh.Name(), err: v}, pos: int(d.r.numread())} +} + +// NumBytesRead returns the number of bytes read +func (d *Decoder) NumBytesRead() int { + return int(d.r.numread()) +} + +// -------------------------------------------------- + +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + switch ctyp { + case valueTypeArray: + x.array = true + clen = dd.ReadArrayStart() + case valueTypeMap: + clen = dd.ReadMapStart() * 2 + default: + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) + } + // x.ct = ctyp + x.d = d + return +} + +func (x decSliceHelper) End() { + if x.array { + x.d.d.ReadArrayEnd() + } else { + x.d.d.ReadMapEnd() + } +} + +func (x decSliceHelper) ElemContainerState(index int) { + if x.array { + x.d.d.ReadArrayElem() + } else if index%2 == 0 { + x.d.d.ReadMapElemKey() + } else { + x.d.d.ReadMapElemValue() + } +} + +func decByteSlice(r *decReaderSwitch, clen, maxInitLen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice + } + if len(bs) == clen { + bsOut = bs + r.readb(bsOut) + } else if cap(bs) >= clen { + bsOut = bs[:clen] + r.readb(bsOut) + } else { + // bsOut = make([]byte, clen) + len2 := decInferLen(clen, maxInitLen, 1) + bsOut = make([]byte, len2) + r.readb(bsOut) + for len2 < clen { + len3 := decInferLen(clen-len2, maxInitLen, 1) + bs3 := bsOut + bsOut = make([]byte, len2+len3) + copy(bsOut, bs3) + r.readb(bsOut[len2:]) + len2 += len3 + } + } + return +} + +// func decByteSliceZeroCopy(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { +// if _, ok := r.(*bytesDecReader); ok && clen <= maxInitLen { +// return r.readx(clen) +// } +// return decByteSlice(r, clen, maxInitLen, bs) +// } + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } + } + return in +} + +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if unit == 0 { + return clen + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + } else { + rvlen = clen + } + return +} + +func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) ( + s2 reflect.Value, scap2 int, changed bool, err string) { + l1 := slen + num // new slice length + if l1 < slen { + err = errmsgExpandSliceOverflow + return + } + if l1 <= scap { + if s.CanSet() { + s.SetLen(l1) + } else if canChange { + s2 = s.Slice(0, l1) + scap2 = scap + changed = true + } else { + err = errmsgExpandSliceCannotChange + return + } + return + } + if !canChange { + err = errmsgExpandSliceCannotChange + return + } + scap2 = growCap(scap, stElemSize, num) + s2 = reflect.MakeSlice(st, l1, scap2) + changed = true + reflect.Copy(s2, s) + return +} + +func decReadFull(r io.Reader, bs []byte) (n uint, err error) { + var nn int + for n < uint(len(bs)) && err == nil { + nn, err = r.Read(bs[n:]) + if nn > 0 { + if err == io.EOF { + // leave EOF for next time + err = nil + } + n += uint(nn) + } + } + // xdebugf("decReadFull: len(bs): %v, n: %v, err: %v", len(bs), n, err) + // do not do this - it serves no purpose + // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } + return +} + +func decNakedReadRawBytes(dr decDriver, d *Decoder, n *decNaked, rawToString bool) { + if rawToString { + n.v = valueTypeString + n.s = string(dr.DecodeBytes(d.b[:], true)) + } else { + n.v = valueTypeBytes + n.l = dr.DecodeBytes(nil, false) + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/doc.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/doc.go new file mode 100644 index 0000000000000..325c6e1ed9570 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/doc.go @@ -0,0 +1,239 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +Package codec provides a High Performance, Feature-Rich Idiomatic +codec/encoding library for msgpack, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - json: http://json.org http://tools.ietf.org/html/rfc7159 + +For detailed usage information, read the primer at +http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in the +standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Excellent code coverage ( > 90% ) + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores + - In-place updates during decode, with option to zero value in maps and slices prior to decode + - Coerce types where appropriate + e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Support IsZero() bool to determine if a value is a zero value. + Analogous to time.Time.IsZero() bool. + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Mapping a non-interface type to an interface, so we can decode appropriately + into any interface type with a correctly configured non-interface value. + - Encode a struct as an array, and decode struct from an array in the data stream + - Option to encode struct keys as numbers (instead of strings) + (to support structured streams with fields encoded as numeric codes) + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +## Extension Support + +Users can register a function to handle the encoding or decoding of their +custom types. + +There are no restrictions on what the custom type can be. Some examples: + +```go + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +``` + +As an illustration, MyStructWithUnexportedFields would normally be encoded +as an empty map because it has no exported fields, while UUID would be +encoded as a string. However, with extension support, you can encode any of +these however you like. + +## Custom Encoding and Decoding + +This package maintains symmetry in the encoding and decoding halfs. We +determine how to encode or decode by walking this decision tree + + - is type a codec.Selfer? + - is there an extension registered for the type? + - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? + - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? + - is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler? + - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc + +This symmetry is important to reduce chances of issues happening because the +encoding and decoding sides are out of sync e.g. decoded via very specific +encoding.TextUnmarshaler but encoded via kind-specific generalized mode. + +Consequently, if a type only defines one-half of the symmetry (e.g. it +implements UnmarshalJSON() but not MarshalJSON() ), then that type doesn't +satisfy the check and we will continue walking down the decision tree. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used with +the standard net/rpc package. + +## Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent +modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + +```go + + // create and configure Handle + var ( + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &mh + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +``` + +## Running Tests + +To run tests, use the following: + +``` + + go test + +``` + +To run the full suite of tests, use the following: + +``` + + go test -tags alltests -run Suite + +``` + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + +``` + + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite + +``` + +## Running Benchmarks + +``` + + cd codec/bench + ./bench.sh -d + ./bench.sh -c + ./bench.sh -s + go test -bench . -benchmem -benchtime 1s + +``` + +Please see http://github.com/hashicorp/go-codec-bench . + +## Caveats + +Struct fields matching the following are ignored during encoding and +decoding + + - struct tag value set to - + - func, complex numbers, unsafe pointers + - unexported and not embedded + - unexported and embedded and not struct kind + - unexported and embedded pointers + +Every other field in a struct will be encoded/decoded. + +Embedded fields are encoded as if they exist in the top-level struct, with +some caveats. See Encode documentation. +*/ +package codec diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/encode.go new file mode 100644 index 0000000000000..29c723e13e5f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/encode.go @@ -0,0 +1,1812 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "runtime" + "sort" + "strconv" + "time" +) + +// defEncByteBufSize is the default size of []byte used +// for bufio buffer or []byte (when nil passed) +const defEncByteBufSize = 1 << 10 // 4:16, 6:64, 8:256, 10:1024 + +var errEncoderNotInitialized = errors.New("Encoder not initialized") + +/* + +// encWriter abstracts writing to a byte array or to an io.Writer. +// +// +// Deprecated: Use encWriterSwitch instead. +type encWriter interface { + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + end() +} + +*/ + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + // Deprecated: use EncodeStringEnc instead + EncodeString(c charEncoding, v string) + // Deprecated: use EncodeStringBytesRaw instead + EncodeStringBytes(c charEncoding, v []byte) + EncodeStringEnc(c charEncoding, v string) // c cannot be cRAW + // EncodeSymbol(v string) + EncodeStringBytesRaw(v []byte) + EncodeTime(time.Time) + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) + WriteArrayStart(length int) + WriteArrayElem() + WriteArrayEnd() + WriteMapStart(length int) + WriteMapElemKey() + WriteMapElemValue() + WriteMapEnd() + + reset() + atEndOfEncode() +} + +type encDriverAsis interface { + EncodeAsis(v []byte) +} + +type encodeError struct { + codecError +} + +func (e encodeError) Error() string { + return fmt.Sprintf("%s encode error: %v", e.name, e.err) +} + +type encDriverNoopContainerWriter struct{} + +func (encDriverNoopContainerWriter) WriteArrayStart(length int) {} +func (encDriverNoopContainerWriter) WriteArrayElem() {} +func (encDriverNoopContainerWriter) WriteArrayEnd() {} +func (encDriverNoopContainerWriter) WriteMapStart(length int) {} +func (encDriverNoopContainerWriter) WriteMapElemKey() {} +func (encDriverNoopContainerWriter) WriteMapElemValue() {} +func (encDriverNoopContainerWriter) WriteMapEnd() {} +func (encDriverNoopContainerWriter) atEndOfEncode() {} + +type encDriverTrackContainerWriter struct { + c containerState +} + +func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart } +func (e *encDriverTrackContainerWriter) WriteArrayElem() { e.c = containerArrayElem } +func (e *encDriverTrackContainerWriter) WriteArrayEnd() { e.c = containerArrayEnd } +func (e *encDriverTrackContainerWriter) WriteMapStart(length int) { e.c = containerMapStart } +func (e *encDriverTrackContainerWriter) WriteMapElemKey() { e.c = containerMapKey } +func (e *encDriverTrackContainerWriter) WriteMapElemValue() { e.c = containerMapValue } +func (e *encDriverTrackContainerWriter) WriteMapEnd() { e.c = containerMapEnd } +func (e *encDriverTrackContainerWriter) atEndOfEncode() {} + +// type ioEncWriterWriter interface { +// WriteByte(c byte) error +// WriteString(s string) (n int, err error) +// Write(p []byte) (n int, err error) +// } + +// EncodeOptions captures configuration options during encode. +type EncodeOptions struct { + // WriterBufferSize is the size of the buffer used when writing. + // + // if > 0, we use a smart buffer internally for performance purposes. + WriterBufferSize int + + // ChanRecvTimeout is the timeout used when selecting from a chan. + // + // Configuring this controls how we receive from a chan during the encoding process. + // - If ==0, we only consume the elements currently available in the chan. + // - if <0, we consume until the chan is closed. + // - If >0, we consume until this timeout. + ChanRecvTimeout time.Duration + + // StructToArray specifies to encode a struct as an array, and not as a map + StructToArray bool + + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. + // + // This only affects maps, as the iteration order for maps is random. + // + // The implementation MAY use the natural sort order for the map keys if possible: + // + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool + + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + + // StringToRaw controls how strings are encoded. + // + // As a go string is just an (immutable) sequence of bytes, + // it can be encoded either as raw bytes or as a UTF string. + // + // By default, strings are encoded as UTF-8. + // but can be treated as []byte during an encode. + // + // Note that things which we know (by definition) to be UTF-8 + // are ALWAYS encoded as UTF-8 strings. + // These include encoding.TextMarshaler, time.Format calls, struct field names, etc. + StringToRaw bool + + // // AsSymbols defines what should be encoded as symbols. + // // + // // Encoding as symbols can reduce the encoded size significantly. + // // + // // However, during decoding, each string to be encoded as a symbol must + // // be checked to see if it has been seen before. Consequently, encoding time + // // will increase if using symbols, because string comparisons has a clear cost. + // // + // // Sample values: + // // AsSymbolNone + // // AsSymbolAll + // // AsSymbolMapStringKeys + // // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + // AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +/* + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w io.Writer + ww io.Writer + bw io.ByteWriter + sw ioEncStringWriter + fw ioFlusher + b [8]byte +} + +func (z *ioEncWriter) reset(w io.Writer) { + z.w = w + var ok bool + if z.bw, ok = w.(io.ByteWriter); !ok { + z.bw = z + } + if z.sw, ok = w.(ioEncStringWriter); !ok { + z.sw = z + } + z.fw, _ = w.(ioFlusher) + z.ww = w +} + +func (z *ioEncWriter) WriteByte(b byte) (err error) { + z.b[0] = b + _, err = z.w.Write(z.b[:1]) + return +} + +func (z *ioEncWriter) WriteString(s string) (n int, err error) { + return z.w.Write(bytesView(s)) +} + +func (z *ioEncWriter) writeb(bs []byte) { + if _, err := z.ww.Write(bs); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writestr(s string) { + if _, err := z.sw.WriteString(s); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.bw.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1, b2 byte) { + var err error + if err = z.bw.WriteByte(b1); err == nil { + if err = z.bw.WriteByte(b2); err == nil { + return + } + } + panic(err) +} + +// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { +// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 +// if _, err := z.ww.Write(z.b[:5]); err != nil { +// panic(err) +// } +// } + +//go:noinline - so *encWriterSwitch.XXX has the bytesEncAppender.XXX inlined +func (z *ioEncWriter) end() { + if z.fw != nil { + if err := z.fw.Flush(); err != nil { + panic(err) + } + } +} + +*/ + +// --------------------------------------------- + +// bufioEncWriter +type bufioEncWriter struct { + buf []byte + w io.Writer + n int + sz int // buf size + + // Extensions can call Encode() within a current Encode() call. + // We need to know when the top level Encode() call returns, + // so we can decide whether to Release() or not. + calls uint16 // what depth in mustDecode are we in now. + + _ [6]uint8 // padding + + bytesBufPooler + + _ [1]uint64 // padding + // a int + // b [4]byte + // err +} + +func (z *bufioEncWriter) reset(w io.Writer, bufsize int) { + z.w = w + z.n = 0 + z.calls = 0 + if bufsize <= 0 { + bufsize = defEncByteBufSize + } + z.sz = bufsize + if cap(z.buf) >= bufsize { + z.buf = z.buf[:cap(z.buf)] + } else { + z.buf = z.bytesBufPooler.get(bufsize) + // z.buf = make([]byte, bufsize) + } +} + +func (z *bufioEncWriter) release() { + z.buf = nil + z.bytesBufPooler.end() +} + +//go:noinline - flush only called intermittently +func (z *bufioEncWriter) flushErr() (err error) { + n, err := z.w.Write(z.buf[:z.n]) + z.n -= n + if z.n > 0 && err == nil { + err = io.ErrShortWrite + } + if n > 0 && z.n > 0 { + copy(z.buf, z.buf[n:z.n+n]) + } + return err +} + +func (z *bufioEncWriter) flush() { + if err := z.flushErr(); err != nil { + panic(err) + } +} + +func (z *bufioEncWriter) writeb(s []byte) { +LOOP: + a := len(z.buf) - z.n + if len(s) > a { + z.n += copy(z.buf[z.n:], s[:a]) + s = s[a:] + z.flush() + goto LOOP + } + z.n += copy(z.buf[z.n:], s) +} + +func (z *bufioEncWriter) writestr(s string) { + // z.writeb(bytesView(s)) // inlined below +LOOP: + a := len(z.buf) - z.n + if len(s) > a { + z.n += copy(z.buf[z.n:], s[:a]) + s = s[a:] + z.flush() + goto LOOP + } + z.n += copy(z.buf[z.n:], s) +} + +func (z *bufioEncWriter) writen1(b1 byte) { + if 1 > len(z.buf)-z.n { + z.flush() + } + z.buf[z.n] = b1 + z.n++ +} + +func (z *bufioEncWriter) writen2(b1, b2 byte) { + if 2 > len(z.buf)-z.n { + z.flush() + } + z.buf[z.n+1] = b2 + z.buf[z.n] = b1 + z.n += 2 +} + +func (z *bufioEncWriter) endErr() (err error) { + if z.n > 0 { + err = z.flushErr() + } + return +} + +// --------------------------------------------- + +// bytesEncAppender implements encWriter and can write to an byte slice. +type bytesEncAppender struct { + b []byte + out *[]byte +} + +func (z *bytesEncAppender) writeb(s []byte) { + z.b = append(z.b, s...) +} +func (z *bytesEncAppender) writestr(s string) { + z.b = append(z.b, s...) +} +func (z *bytesEncAppender) writen1(b1 byte) { + z.b = append(z.b, b1) +} +func (z *bytesEncAppender) writen2(b1, b2 byte) { + z.b = append(z.b, b1, b2) +} +func (z *bytesEncAppender) endErr() error { + *(z.out) = z.b + return nil +} +func (z *bytesEncAppender) reset(in []byte, out *[]byte) { + z.b = in[:0] + z.out = out +} + +// --------------------------------------------- + +func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeRawExt(rv2i(rv).(*RawExt), e) +} + +func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) +} + +func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(e) +} + +func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeNil() +} + +func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { + e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} + +func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { + ti := f.ti + ee := e.e + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + if f.seq != seqTypeArray { + if rv.IsNil() { + ee.EncodeNil() + return + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + ee.EncodeStringBytesRaw(rv.Bytes()) + return + } + } + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 { + e.errorf("send-only channel cannot be encoded") + } + elemsep := e.esep + rtelem := ti.elem + rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8 + var l int + // if a slice, array or chan of bytes, treat specially + if rtelemIsByte { + switch f.seq { + case seqTypeSlice: + ee.EncodeStringBytesRaw(rv.Bytes()) + case seqTypeArray: + l = rv.Len() + if rv.CanAddr() { + ee.EncodeStringBytesRaw(rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + ee.EncodeStringBytesRaw(bs) + } + case seqTypeChan: + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } + // ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte. + + if rv.IsNil() { + ee.EncodeNil() + break + } + bs := e.b[:0] + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + + L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: // only consume available + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: // consume until timeout + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + // close(tt.C) + break L1 + } + } + default: // consume until close + for b := range ch { + bs = append(bs, b) + } + } + + ee.EncodeStringBytesRaw(bs) + } + return + } + + // if chan, consume chan into a slice, and work off that slice. + if f.seq == seqTypeChan { + rvcs := reflect.Zero(reflect.SliceOf(rtelem)) + timeout := e.h.ChanRecvTimeout + if timeout < 0 { // consume until close + for { + recv, recvOk := rv.Recv() + if !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } else { + cases := make([]reflect.SelectCase, 2) + cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv} + if timeout == 0 { + cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault} + } else { + tt := time.NewTimer(timeout) + cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)} + } + for { + chosen, recv, recvOk := reflect.Select(cases) + if chosen == 1 || !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } + rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected + } + + l = rv.Len() + if ti.mbs { + if l%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", l) + return + } + ee.WriteMapStart(l / 2) + } else { + ee.WriteArrayStart(l) + } + + if l > 0 { + var fn *codecFn + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + if rtelem.Kind() != reflect.Interface { + fn = e.h.fn(rtelem, true, true) + } + for j := 0; j < l; j++ { + if elemsep { + if ti.mbs { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } else { + ee.WriteArrayElem() + } + } + e.encodeValue(rv.Index(j), fn, true) + } + } + + if ti.mbs { + ee.WriteMapEnd() + } else { + ee.WriteArrayEnd() + } +} + +func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + tisfi := fti.sfiSrc + toMap := !(fti.toArray || e.h.StructToArray) + if toMap { + tisfi = fti.sfiSort + } + + ee := e.e + + sfn := structFieldNode{v: rv, update: false} + if toMap { + ee.WriteMapStart(len(tisfi)) + if e.esep { + for _, si := range tisfi { + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) + ee.WriteMapElemValue() + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(len(tisfi)) + if e.esep { + for _, si := range tisfi { + ee.WriteArrayElem() + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteArrayEnd() + } +} + +func (e *Encoder) kStructFieldKey(keyType valueType, encNameAsciiAlphaNum bool, encName string) { + encStructFieldKey(encName, e.e, e.w, keyType, encNameAsciiAlphaNum, e.js) +} + +func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + elemsep := e.esep + tisfi := fti.sfiSrc + var newlen int + toMap := !(fti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if f.ti.mf { + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + toMap = true + newlen += len(mf) + } else if f.ti.mfp { + if rv.CanAddr() { + mf = rv2i(rv.Addr()).(MissingFielder).CodecMissingFields() + } else { + // make a new addressable value of same one, and use it + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + mf = rv2i(rv2).(MissingFielder).CodecMissingFields() + } + toMap = true + newlen += len(mf) + } + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfiSort + } + newlen += len(tisfi) + ee := e.e + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + // + // Each element of the array pools one of encStructPool(8|16|32|64). + // It allows the re-use of slices up to 64 in length. + // A performance cost of encoding structs was collecting + // which values were empty and should be omitted. + // We needed slices of reflect.Value and string to collect them. + // This shared pool reduces the amount of unnecessary creation we do. + // The cost is that of locking sometimes, but sync.Pool is efficient + // enough to reduce thread contention. + + // fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen) + var spool sfiRvPooler + var fkvs = spool.get(newlen) + + var kv sfiRv + recur := e.h.RecursiveEmptyCheck + sfn := structFieldNode{v: rv, update: false} + newlen = 0 + for _, si := range tisfi { + // kv.r = si.field(rv, false) + kv.r = sfn.field(si) + if toMap { + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { + continue + } + kv.v = si // si.encName + } else { + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, + reflect.Array, reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } + } + } + fkvs[newlen] = kv + newlen++ + } + fkvs = fkvs[:newlen] + + var mflen int + for k, v := range mf { + if k == "" { + delete(mf, k) + continue + } + if fti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur, recur) { + delete(mf, k) + continue + } + mflen++ + } + + var j int + if toMap { + ee.WriteMapStart(newlen + mflen) + if elemsep { + for j = 0; j < len(fkvs); j++ { + kv = fkvs[j] + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) + ee.WriteMapElemValue() + e.encodeValue(kv.r, nil, true) + } + } else { + for j = 0; j < len(fkvs); j++ { + kv = fkvs[j] + e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) + e.encodeValue(kv.r, nil, true) + } + } + // now, add the others + for k, v := range mf { + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, false, k) + ee.WriteMapElemValue() + e.encode(v) + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(newlen) + if elemsep { + for j = 0; j < len(fkvs); j++ { + ee.WriteArrayElem() + e.encodeValue(fkvs[j].r, nil, true) + } + } else { + for j = 0; j < len(fkvs); j++ { + e.encodeValue(fkvs[j].r, nil, true) + } + } + ee.WriteArrayEnd() + } + + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + spool.end() +} + +func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { + ee := e.e + if rv.IsNil() { + ee.EncodeNil() + return + } + + l := rv.Len() + ee.WriteMapStart(l) + if l == 0 { + ee.WriteMapEnd() + return + } + // var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *codecFn + ti := f.ti + rtkey0 := ti.key + rtkey := rtkey0 + rtval0 := ti.elem + rtval := rtval0 + // rtkeyid := rt2id(rtkey0) + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + valFn = e.h.fn(rtval, true, true) + } + mks := rv.MapKeys() + + if e.h.Canonical { + e.kMapCanonical(rtkey, rv, mks, valFn) + ee.WriteMapEnd() + return + } + + var keyTypeIsString = stringTypId == rt2id(rtkey0) // rtkeyid + if !keyTypeIsString { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + // rtkeyid = rt2id(rtkey) + keyFn = e.h.fn(rtkey, true, true) + } + } + + // for j, lmks := 0, len(mks); j < lmks; j++ { + for j := range mks { + if e.esep { + ee.WriteMapElemKey() + } + if keyTypeIsString { + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(mks[j].String())) + } else { + ee.EncodeStringEnc(cUTF8, mks[j].String()) + } + } else { + e.encodeValue(mks[j], keyFn, true) + } + if e.esep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mks[j]), valFn, true) + + } + ee.WriteMapEnd() +} + +func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn) { + ee := e.e + elemsep := e.esep + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeBool(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(mksv[i].v)) + } else { + ee.EncodeStringEnc(cUTF8, mksv[i].v) + } + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeUint(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeInt(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(mksv[i].v)) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Struct: + if rv.Type() == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + sort.Sort(timeRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeTime(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + break + } + fallthrough + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if elemsep { + ee.WriteMapElemKey() + } + e.asis(mksbv[j].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) + } + } +} + +// // -------------------------------------------------- + +type encWriterSwitch struct { + // wi *ioEncWriter + wb bytesEncAppender + wf *bufioEncWriter + // typ entryType + bytes bool // encoding to []byte + esep bool // whether it has elem separators + isas bool // whether e.as != nil + js bool // is json encoder? + be bool // is binary encoder? + _ [2]byte // padding + // _ [2]uint64 // padding + // _ uint64 // padding +} + +func (z *encWriterSwitch) writeb(s []byte) { + if z.bytes { + z.wb.writeb(s) + } else { + z.wf.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + if z.bytes { + z.wb.writestr(s) + } else { + z.wf.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + if z.bytes { + z.wb.writen1(b1) + } else { + z.wf.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + if z.bytes { + z.wb.writen2(b1, b2) + } else { + z.wf.writen2(b1, b2) + } +} +func (z *encWriterSwitch) endErr() error { + if z.bytes { + return z.wb.endErr() + } + return z.wf.endErr() +} + +func (z *encWriterSwitch) end() { + if err := z.endErr(); err != nil { + panic(err) + } +} + +/* + +// ------------------------------------------ +func (z *encWriterSwitch) writeb(s []byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writeb(s) + case entryTypeIo: + z.wi.writeb(s) + default: + z.wf.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + switch z.typ { + case entryTypeBytes: + z.wb.writestr(s) + case entryTypeIo: + z.wi.writestr(s) + default: + z.wf.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writen1(b1) + case entryTypeIo: + z.wi.writen1(b1) + default: + z.wf.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writen2(b1, b2) + case entryTypeIo: + z.wi.writen2(b1, b2) + default: + z.wf.writen2(b1, b2) + } +} +func (z *encWriterSwitch) end() { + switch z.typ { + case entryTypeBytes: + z.wb.end() + case entryTypeIo: + z.wi.end() + default: + z.wf.end() + } +} + +// ------------------------------------------ +func (z *encWriterSwitch) writeb(s []byte) { + if z.bytes { + z.wb.writeb(s) + } else { + z.wi.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + if z.bytes { + z.wb.writestr(s) + } else { + z.wi.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + if z.bytes { + z.wb.writen1(b1) + } else { + z.wi.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + if z.bytes { + z.wb.writen2(b1, b2) + } else { + z.wi.writen2(b1, b2) + } +} +func (z *encWriterSwitch) end() { + if z.bytes { + z.wb.end() + } else { + z.wi.end() + } +} + +*/ + +// Encoder writes an object to an output stream in a supported format. +// +// Encoder is NOT safe for concurrent use i.e. a Encoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Encoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. +type Encoder struct { + panicHdl + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w *encWriterSwitch + + // bw *bufio.Writer + as encDriverAsis + + err error + + h *BasicHandle + hh Handle + // ---- cpu cache line boundary? + 3 + encWriterSwitch + + ci set + + b [(5 * 8)]byte // for encoding chan or (non-addressable) [N]byte + + // ---- writable fields during execution --- *try* to keep in sep cache line + + // ---- cpu cache line boundary? + // b [scratchByteArrayLen]byte + // _ [cacheLineSize - scratchByteArrayLen]byte // padding + // b [cacheLineSize - (8 * 0)]byte // used for encoding a chan or (non-addressable) array of bytes +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to configure WriterBufferSize on the handle +// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + e := newEncoder(h) + e.Reset(w) + return e +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{h: basicHandle(h), err: errEncoderNotInitialized} + e.bytes = true + if useFinalizers { + runtime.SetFinalizer(e, (*Encoder).finalize) + // xdebugf(">>>> new(Encoder) with finalizer") + } + e.w = &e.encWriterSwitch + e.hh = h + e.esep = h.hasElemSeparators() + + return e +} + +func (e *Encoder) resetCommon() { + // e.w = &e.encWriterSwitch + if e.e == nil || e.hh.recreateEncDriver(e.e) { + e.e = e.hh.newEncDriver(e) + e.as, e.isas = e.e.(encDriverAsis) + // e.cr, _ = e.e.(containerStateRecv) + } + e.be = e.hh.isBinary() + _, e.js = e.hh.(*JsonHandle) + e.e.reset() + e.err = nil +} + +// Reset resets the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + if w == nil { + return + } + // var ok bool + e.bytes = false + if e.wf == nil { + e.wf = new(bufioEncWriter) + } + // e.typ = entryTypeUnset + // if e.h.WriterBufferSize > 0 { + // // bw := bufio.NewWriterSize(w, e.h.WriterBufferSize) + // // e.wi.bw = bw + // // e.wi.sw = bw + // // e.wi.fw = bw + // // e.wi.ww = bw + // if e.wf == nil { + // e.wf = new(bufioEncWriter) + // } + // e.wf.reset(w, e.h.WriterBufferSize) + // e.typ = entryTypeBufio + // } else { + // if e.wi == nil { + // e.wi = new(ioEncWriter) + // } + // e.wi.reset(w) + // e.typ = entryTypeIo + // } + e.wf.reset(w, e.h.WriterBufferSize) + // e.typ = entryTypeBufio + + // e.w = e.wi + e.resetCommon() +} + +// ResetBytes resets the Encoder with a new destination output []byte. +func (e *Encoder) ResetBytes(out *[]byte) { + if out == nil { + return + } + var in []byte = *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + e.bytes = true + // e.typ = entryTypeBytes + e.wb.reset(in, out) + // e.w = &e.wb + e.resetCommon() +} + +// Encode writes an object into a stream. +// +// Encoding can be configured via the struct tag for the fields. +// The key (in the struct tags) that we look at is configurable. +// +// By default, we look up the "codec" key in the struct field's tags, +// and fall bak to the "json" key if "codec" is absent. +// That key in struct field's tag value is the key name, +// followed by an optional comma and options. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. The options +// which can be set on _struct are: +// - omitempty: so all fields are omitted if empty +// - toarray: so struct is encoded as an array +// - int: so struct key names are encoded as signed integers (instead of strings) +// - uint: so struct key names are encoded as unsigned integers (instead of strings) +// - float: so struct key names are encoded as floats (instead of strings) +// +// More details on these below. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// ... +// This key is typically encoded as a string. +// However, there are instances where the encoded stream has mapping keys encoded as numbers. +// For example, some cbor streams have keys as integer codes in the stream, but they should map +// to fields in a structured object. Consequently, a struct is the natural representation in code. +// For these, configure the struct to encode/decode the keys as numbers (instead of string). +// This is done with the int,uint or float option on the _struct field (see above). +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the tag on the _struct field sets the "toarray" option +// +// Note that omitempty is ignored when encoding struct values as arrays, +// as an entry must be encoded for each field, to maintain its position. +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type +// +// Examples: +// +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",toarray"` //encode struct as an array +// } +// +// type MyStruct struct { +// _struct bool `codec:",uint"` //encode struct with "unsigned integer" keys +// Field1 string `codec:"1"` //encode Field1 key using: EncodeInt(1) +// Field2 string `codec:"2"` //encode Field2 key using: EncodeInt(2) +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method +// - If an extension is registered for it, call that extension function +// - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + // tried to use closure, as runtime optimizes defer with no params. + // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). + // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 + // defer func() { e.deferred(&err) }() } + // { x, y := e, &err; defer func() { x.deferred(y) }() } + if e.err != nil { + return e.err + } + if recoverPanicToErr { + defer func() { + // if error occurred during encoding, return that error; + // else if error occurred on end'ing (i.e. during flush), return that error. + err = e.w.endErr() + x := recover() + if x == nil { + e.err = err + } else { + panicValToErr(e, x, &e.err) + err = e.err + } + }() + } + + // defer e.deferred(&err) + e.mustEncode(v) + return +} + +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + if e.err != nil { + panic(e.err) + } + e.mustEncode(v) +} + +func (e *Encoder) mustEncode(v interface{}) { + if e.wf == nil { + e.encode(v) + e.e.atEndOfEncode() + e.w.end() + return + } + + if e.wf.buf == nil { + e.wf.buf = e.wf.bytesBufPooler.get(e.wf.sz) + } + e.wf.calls++ + + e.encode(v) + + e.wf.calls-- + + if e.wf.calls == 0 { + e.e.atEndOfEncode() + e.w.end() + if !e.h.ExplicitRelease { + e.wf.release() + } + } +} + +// func (e *Encoder) deferred(err1 *error) { +// e.w.end() +// if recoverPanicToErr { +// if x := recover(); x != nil { +// panicValToErr(e, x, err1) +// panicValToErr(e, x, &e.err) +// } +// } +// } + +//go:noinline -- as it is run by finalizer +func (e *Encoder) finalize() { + // xdebugf("finalizing Encoder") + e.Release() +} + +// Release releases shared (pooled) resources. +// +// It is important to call Release() when done with an Encoder, so those resources +// are released instantly for use by subsequently created Encoders. +func (e *Encoder) Release() { + if e.wf != nil { + e.wf.release() + } +} + +func (e *Encoder) encode(iv interface{}) { + // a switch with only concrete types can be optimized. + // consequently, we deal with nil and interfaces outside the switch. + + if iv == nil || definitelyNil(iv) { + e.e.EncodeNil() + return + } + + switch v := iv.(type) { + // case nil: + // case Selfer: + case Raw: + e.rawBytes(v) + case reflect.Value: + e.encodeValue(v, nil, true) + + case string: + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(v)) + } else { + e.e.EncodeStringEnc(cUTF8, v) + } + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case time.Time: + e.e.EncodeTime(v) + case []uint8: + e.e.EncodeStringBytesRaw(v) + + case *Raw: + e.rawBytes(*v) + + case *string: + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(*v)) + } else { + e.e.EncodeStringEnc(cUTF8, *v) + } + case *bool: + e.e.EncodeBool(*v) + case *int: + e.e.EncodeInt(int64(*v)) + case *int8: + e.e.EncodeInt(int64(*v)) + case *int16: + e.e.EncodeInt(int64(*v)) + case *int32: + e.e.EncodeInt(int64(*v)) + case *int64: + e.e.EncodeInt(*v) + case *uint: + e.e.EncodeUint(uint64(*v)) + case *uint8: + e.e.EncodeUint(uint64(*v)) + case *uint16: + e.e.EncodeUint(uint64(*v)) + case *uint32: + e.e.EncodeUint(uint64(*v)) + case *uint64: + e.e.EncodeUint(*v) + case *uintptr: + e.e.EncodeUint(uint64(*v)) + case *float32: + e.e.EncodeFloat32(*v) + case *float64: + e.e.EncodeFloat64(*v) + case *time.Time: + e.e.EncodeTime(*v) + + case *[]uint8: + e.e.EncodeStringBytesRaw(*v) + + default: + if v, ok := iv.(Selfer); ok { + v.CodecEncodeSelf(e) + } else if !fastpathEncodeTypeSwitch(iv, e) { + // checkfastpath=true (not false), as underlying slice/map type may be fast-path + e.encodeValue(reflect.ValueOf(iv), nil, true) + } + } +} + +func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + var sptr uintptr + var rvp reflect.Value + var rvpValid bool +TOP: + switch rv.Kind() { + case reflect.Ptr: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rvpValid = true + rvp = rv + rv = rv.Elem() + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP + } + goto TOP + case reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return + } + + if sptr != 0 && (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } + + if fn == nil { + rt := rv.Type() + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = e.h.fn(rt, checkFastpath, true) + } + if fn.i.addrE { + if rvpValid { + fn.fe(e, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fe(e, &fn.i, rv.Addr()) + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + fn.fe(e, &fn.i, rv2) + } + } else { + fn.fe(e, &fn.i, rv) + } + if sptr != 0 { + (&e.ci).remove(sptr) + } +} + +// func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { +// if fnerr != nil { +// panic(fnerr) +// } +// if bs == nil { +// e.e.EncodeNil() +// } else if asis { +// e.asis(bs) +// } else { +// e.e.EncodeStringBytesRaw(bs) +// } +// } + +func (e *Encoder) marshalUtf8(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeStringEnc(cUTF8, stringView(bs)) + } +} + +func (e *Encoder) marshalAsis(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else { + e.asis(bs) + } +} + +func (e *Encoder) marshalRaw(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeStringBytesRaw(bs) + } +} + +func (e *Encoder) asis(v []byte) { + if e.isas { + e.as.EncodeAsis(v) + } else { + e.w.writeb(v) + } +} + +func (e *Encoder) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) + } + e.asis(v) +} + +func (e *Encoder) wrapErr(v interface{}, err *error) { + *err = encodeError{codecError{name: e.hh.Name(), err: v}} +} + +func encStructFieldKey(encName string, ee encDriver, w *encWriterSwitch, + keyType valueType, encNameAsciiAlphaNum bool, js bool) { + var m must + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + if keyType == valueTypeString { + if js && encNameAsciiAlphaNum { // keyType == valueTypeString + // w.writen1('"') + // w.writestr(encName) + // w.writen1('"') + // ---- + // w.writestr(`"` + encName + `"`) + // ---- + // do concat myself, so it is faster than the generic string concat + b := make([]byte, len(encName)+2) + copy(b[1:], encName) + b[0] = '"' + b[len(b)-1] = '"' + w.writeb(b) + } else { // keyType == valueTypeString + ee.EncodeStringEnc(cUTF8, encName) + } + } else if keyType == valueTypeInt { + ee.EncodeInt(m.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + ee.EncodeUint(m.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + ee.EncodeFloat64(m.Float(strconv.ParseFloat(encName, 64))) + } +} + +// func encStringAsRawBytesMaybe(ee encDriver, s string, stringToRaw bool) { +// if stringToRaw { +// ee.EncodeStringBytesRaw(bytesView(s)) +// } else { +// ee.EncodeStringEnc(cUTF8, s) +// } +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/fast-path.not.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/fast-path.not.go new file mode 100644 index 0000000000000..93cb754a0377c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/fast-path.not.go @@ -0,0 +1,39 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import "reflect" + +// fastpath was removed for safety reasons + +const fastpathEnabled = false + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { + fn := d.h.fn(uint8SliceTyp, true, true) + d.kSlice(&fn.i, reflect.ValueOf(&v).Elem()) + return v, true +} + +var fastpathAV fastpathA +var fastpathTV fastpathT + +// ---- +// type TestMammoth2Wrapper struct{} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-array.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-array.go.tmpl new file mode 100644 index 0000000000000..790e914e13cb3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-array.go.tmpl @@ -0,0 +1,78 @@ +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{else if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int + _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination + {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} + } + {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} + {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this + {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) + {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-map.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-map.go.tmpl new file mode 100644 index 0000000000000..8323b54940d04 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-map.go.tmpl @@ -0,0 +1,42 @@ +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-enc-chan.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-enc-chan.go.tmpl new file mode 100644 index 0000000000000..4249588a3cf8d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-enc-chan.go.tmpl @@ -0,0 +1,27 @@ +{{.Label}}: +switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { +case timeout{{.Sfx}} == 0: // only consume available + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) + default: + break {{.Label}} + } + } +case timeout{{.Sfx}} > 0: // consume until timeout + tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + case <-tt{{.Sfx}}.C: + // close(tt.C) + break {{.Label}} + } + } +default: // consume until close + for b{{.Sfx}} := range {{.Chan}} { + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.generated.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.generated.go new file mode 100644 index 0000000000000..2a7d1aab70b3a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.generated.go @@ -0,0 +1,343 @@ +// comment this out // + build ignore + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from gen-helper.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = 10 + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { + ge = genHelperEncoder{e: e} + ee = genHelperEncDriver{encDriver: e.e} + return +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { + gd = genHelperDecoder{d: d} + dd = genHelperDecDriver{decDriver: d.d} + return +} + +type genHelperEncDriver struct { + encDriver +} + +func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { + encStructFieldKey(s, x.encDriver, nil, keyType, false, false) +} +func (x genHelperEncDriver) EncodeSymbol(s string) { + x.encDriver.EncodeStringEnc(cUTF8, s) +} + +type genHelperDecDriver struct { + decDriver + C checkOverflow +} + +func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { + return decStructFieldKey(x.decDriver, keyType, buf) +} +func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { + return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) +} +func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) +} +func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + f = x.DecodeFloat64() + if chkOverflow32 && chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} +func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { + f = x.DecodeFloat64() + if chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + M must + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + C checkOverflow + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshalUtf8(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshalAsis(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshalRaw(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { +// if _, ok := f.e.hh.(*BincHandle); ok { +// return timeTypId +// } +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.e.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) WriteStr(s string) { + f.e.w.writestr(s) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { f.d.swallow() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { + return &f.d.b +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false) + // f.d.decodeValueFallback(rv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { +// // Note: builtin is no longer supported - so make this a no-op +// if _, ok := f.d.hh.(*BincHandle); ok { +// return timeTypId +// } +// return 0 +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.d.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: no longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.go.tmpl new file mode 100644 index 0000000000000..f5d0634e6a1c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.go.tmpl @@ -0,0 +1,308 @@ +// comment this out // + build ignore + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from gen-helper.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = {{ .Version }} + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { + ge = genHelperEncoder{e: e} + ee = genHelperEncDriver{encDriver: e.e} + return +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { + gd = genHelperDecoder{d: d} + dd = genHelperDecDriver{decDriver: d.d} + return +} + +type genHelperEncDriver struct { + encDriver +} + +func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { + encStructFieldKey(s, x.encDriver, nil, keyType, false, false) +} +func (x genHelperEncDriver) EncodeSymbol(s string) { + x.encDriver.EncodeStringEnc(cUTF8, s) +} + +type genHelperDecDriver struct { + decDriver + C checkOverflow +} + +func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { + return decStructFieldKey(x.decDriver, keyType, buf) +} +func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { + return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) +} +func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) +} +func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + f = x.DecodeFloat64() + if chkOverflow32 && chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} +func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { + f = x.DecodeFloat64() + if chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + M must + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + C checkOverflow + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshalUtf8(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshalAsis(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshalRaw(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } +// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { +// if _, ok := f.e.hh.(*BincHandle); ok { +// return timeTypId +// } +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.e.h.getExt(rtid) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) WriteStr(s string) { + f.e.w.writestr(s) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { f.d.swallow() } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { + return &f.d.b +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false) + // f.d.decodeValueFallback(rv) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } +// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { +// // Note: builtin is no longer supported - so make this a no-op +// if _, ok := f.d.hh.(*BincHandle); ok { +// return timeTypId +// } +// return 0 +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.d.h.getExt(rtid) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: no longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } + diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-internal.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-internal.go new file mode 100644 index 0000000000000..d3c51a537b408 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-internal.go @@ -0,0 +1,284 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "errors" + "go/format" + "io" + "io/ioutil" + "strings" + "sync" + "text/template" +) + +const genVersion = 10 + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "if e.h.StringToRaw { ee.EncodeStringBytesRaw(bytesView(" + vname + ")) " + + "} else { ee.EncodeStringEnc(cUTF8, " + vname + ") }" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + // case "symbol": + // return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "uint8": + return "uint8(chkOvf.UintV(dd.DecodeUint64(), 8))" + case "uint16": + return "uint16(chkOvf.UintV(dd.DecodeUint64(), 16))" + case "uint32": + return "uint32(chkOvf.UintV(dd.DecodeUint64(), 32))" + case "uint64": + return "dd.DecodeUint64()" + case "uintptr": + return "uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "int": + return "int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))" + case "int8": + return "int8(chkOvf.IntV(dd.DecodeInt64(), 8))" + case "int16": + return "int16(chkOvf.IntV(dd.DecodeInt64(), 16))" + case "int32": + return "int32(chkOvf.IntV(dd.DecodeInt64(), 32))" + case "int64": + return "dd.DecodeInt64()" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(chkOvf.Float32V(dd.DecodeFloat64()))" + case "float64": + return "dd.DecodeFloat64()" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +var genInternalNonZeroValueIdx [5]uint64 +var genInternalNonZeroValueStrs = [2][5]string{ + {`"string-is-an-interface"`, "true", `"some-string"`, "11.1", "33"}, + {`"string-is-an-interface-2"`, "true", `"some-string-2"`, "22.2", "44"}, +} + +func genInternalNonZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + genInternalNonZeroValueIdx[0]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[0]%2][0] // return string, to remove ambiguity + case "bool": + genInternalNonZeroValueIdx[1]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[1]%2][1] + case "string": + genInternalNonZeroValueIdx[2]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[2]%2][2] + case "float32", "float64", "float", "double": + genInternalNonZeroValueIdx[3]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[3]%2][3] + default: + genInternalNonZeroValueIdx[4]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[4]%2][4] + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +type genInternal struct { + Version int + Values []genV +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") { + l++ + } + } + return +} + +// var genInternalMu sync.Mutex +var genInternalV = genInternal{Version: genVersion} +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt = genInternal{Version: genVersion} + + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + // if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + // gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + // } + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["nonzerocmd"] = genInternalNonZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.generated.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.generated.go new file mode 100644 index 0000000000000..2178efd5b7189 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.generated.go @@ -0,0 +1,165 @@ +//go:build codecgen.exec +// +build codecgen.exec + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{else if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int + _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination + {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} + } + {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} + {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this + {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) + {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} +` + +const genEncChanTmpl = ` +{{.Label}}: +switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { +case timeout{{.Sfx}} == 0: // only consume available + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) + default: + break {{.Label}} + } + } +case timeout{{.Sfx}} > 0: // consume until timeout + tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + case <-tt{{.Sfx}}.C: + // close(tt.C) + break {{.Label}} + } + } +default: // consume until close + for b{{.Sfx}} := range {{.Chan}} { + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + } +} +` diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.go new file mode 100644 index 0000000000000..92e631cad6ecb --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.go @@ -0,0 +1,1875 @@ +//go:build codecgen.exec +// +build codecgen.exec + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding/base32" + "errors" + "fmt" + "io" + "math/rand" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "text/template" + "time" + "unicode" + "unicode/utf8" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Raw +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// - MissingFielder implementation. +// If a type implements MissingFielder, it is completely ignored by codecgen. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. +// +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +// v6: removed unsafe from gen, and now uses codecgen.exec tag +// v7: +// v8: current - we now maintain compatibility with old generated code. +// v9: skipped +// v10: modified encDriver and decDriver interfaces. Remove deprecated methods after Jan 1, 2019 +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + errGenAllTypesSamePkg = errors.New("All types must be in the same package") + errGenExpectArrayOrMap = errors.New("unexpected type. Expecting array/map/slice") + + // base64 requires 64 unique characters in Go 1.22+, which is not possible for Go identifiers. + genBase32enc = base32.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) +) + +type genBuf struct { + buf []byte +} + +func (x *genBuf) s(s string) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) b(s []byte) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) v() string { return string(x.buf) } +func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) } +func (x *genBuf) reset() { + if x.buf != nil { + x.buf = x.buf[:0] + } +} + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types + + nx bool // no extensions +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE. +func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, + ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + + if len(typ) == 0 { + return + } + x := genRunner{ + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + nx: noExtensions, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(errGenAllTypesSamePkg) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("// +build " + buildTags) + x.line("") + } + x.line(` + +// Code generated by codecgen - DO NOT EDIT. + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + if k == x.imn[k] { + x.linef("\"%s\"", k) + } else { + x.linef("%s \"%s\"", x.imn[k], k) + } + } + // add required packages + for _, k := range [...]string{"runtime", "errors", "strconv"} { // "reflect", "fmt" + if _, ok := x.im[k]; !ok { + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8)) + x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW)) + x.linef("// ----- value types used ----") + for _, vt := range [...]valueType{ + valueTypeArray, valueTypeMap, valueTypeString, + valueTypeInt, valueTypeUint, valueTypeFloat} { + x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt)) + } + + x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs) + x.line(")") + x.line("var (") + x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.varsfxreset() + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.outf(`panic("codecgen version mismatch: current: %v, need " + strconv.FormatInt(int64(%sGenVersion), 10) + ". Re-generate file: " + file)`, genVersion, x.cpfx) + // x.out(`panic(fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + // x.linef(`%v, %sGenVersion, file))`, genVersion, x.cpfx) + x.linef("}") + x.line("if false { var _ byte = 0; // reference the types, but skip this branch at build/run time") + // x.line("_ = strconv.ParseInt") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := rt2id(t) + // generate enc functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) varsfxreset() { + x.c = 0 +} + +func (x *genRunner) out(s string) { + _, err := io.WriteString(x.w, s) + if err != nil { + panic(err) + } +} + +func (x *genRunner) outf(s string, params ...interface{}) { + _, err := fmt.Fprintf(x.w, s, params...) + if err != nil { + panic(err) + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.outf(s, params...) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs/arrays always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp) + x.varsfxreset() + + fnSigPfx := "func (" + genTopLevelVarName + " " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + x.out(fnSigPfx) + + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0, true) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, t reflect.Type, encode, isptr bool) { + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname) + } + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + + switch t.Kind() { + case reflect.Ptr: + telem := t.Elem() + tek := telem.Kind() + if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) { + x.enc(varname, genNonPtr(t)) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + case reflect.Struct, reflect.Array: + if t == timeTyp { + x.enc(varname, t) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type t, where t represents T. +// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T +// (to prevent copying), +// else t is of type T +func (x *genRunner) enc(varname string, t reflect.Type) { + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + mi := x.varsfx() + // tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if tk == reflect.Array || (tk == reflect.Struct && rtid != timeTypId) { // varname is of type *T + // if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if ti2.cs { // t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if ti2.csp { // tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, RawExt, Raw + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == timeTyp { + x.linef("} else if !z.EncBasicHandle().TimeNotBuiltin { r.EncodeTime(%s)", varname) + // return + } + if t == rawTyp { + x.linef("} else { z.EncRaw(%s)", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%s, e)", varname) + return + } + // only check for extensions if the type is named, and has a packagePath. + var arrayOrStruct = tk == reflect.Array || tk == reflect.Struct // meaning varname if of type *T + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + if arrayOrStruct { // varname is of type *T + if ti2.bm || ti2.bmp { // t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } + if ti2.jm || ti2.jmp { // t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.tm || ti2.tmp { // t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } + } else { // varname is of type T + if ti2.bm { // t.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } else if ti2.bmp { // tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(&%v) ", varname) + } + if ti2.jm { // t.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.jmp { // tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", varname) + } else if ti2.tm { // t.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } else if ti2.tmp { // tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(&%v) ", varname) + } + } + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.linef("if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw(z.BytesView(string(%s))) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, string(%s)) }", varname, x.xs, varname) + case reflect.Chan: + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, t, true, true) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytesRaw([]byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.linef(`if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw([]byte{}) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, "") }`, x.xs) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) { + // smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + varname2 := varname + "." + t2.Name + switch t2.Type.Kind() { + case reflect.Struct: + rtid2 := rt2id(t2.Type) + ti2 := x.ti.get(rtid2, t2.Type) + // fmt.Printf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name) + if ti2.rtid == timeTypId { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagComparable) { + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + break + } + // buf.s("(") + buf.s("false") + for i, n := 0, t2.Type.NumField(); i < n; i++ { + f := t2.Type.Field(i) + if f.PkgPath != "" { // unexported + continue + } + buf.s(" || ") + x.encOmitEmptyLine(f, varname2, buf) + } + //buf.s(")") + case reflect.Bool: + buf.s(varname2) + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + buf.s("len(").s(varname2).s(") != 0") + default: + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) + x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray) + + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + + // var nn int + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + if ti.anyOmitEmpty { + x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi)) + + for j, si := range tisfi { + _ = j + if !si.omitEmpty() { + // x.linef("%s[%v] = true // %s", numfieldsvar, j, si.fieldName) + x.linef("true, // %s", si.fieldName) + // nn++ + continue + } + var t2 reflect.StructField + var omitline genBuf + { + t2typ := t + varname3 := varname + // go through the loop, record the t2 field explicitly, + // and gather the omit line if embedded in pointers. + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + // do not include actual field in the omit line. + // that is done subsequently (right after - below). + if uint8(ij+1) < si.nis && t2typ.Kind() == reflect.Ptr { + omitline.s(varname3).s(" != nil && ") + } + } + } + x.encOmitEmptyLine(t2, varname, &omitline) + x.linef("%s, // %s", omitline.v(), si.fieldName) + } + x.line("}") + x.linef("_ = %s", numfieldsvar) + } + // x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("r.WriteArrayStart(%d)", len(tisfi)) + x.linef("} else {") // if not ti.toArray + if ti.anyOmitEmpty { + // nn = 0 + // x.linef("var %snn%s = %v", genTempVarPfx, i, nn) + x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + } else { + x.linef("r.WriteMapStart(%d)", len(tisfi)) + } + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) + } + x.line("r.WriteArrayElem()") + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty() { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.line("r.WriteMapElemKey()") + + // emulate EncStructFieldKey + switch ti.keyType { + case valueTypeInt: + x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName) + case valueTypeUint: + x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName) + case valueTypeFloat: + x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName) + default: // string + if si.encNameAsciiAlphaNum { + x.linef(`if z.IsJSONHandle() { z.WriteStr("\"%s\"") } else { `, si.encName) + } + x.linef("r.EncodeStringEnc(codecSelferCcUTF8%s, `%s`)", x.xs, si.encName) + if si.encNameAsciiAlphaNum { + x.linef("}") + } + } + // x.linef("r.EncStructFieldKey(codecSelferValueType%s%s, `%s`)", ti.keyType.String(), x.xs, si.encName) + x.line("r.WriteMapElemValue()") + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty() { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.WriteArrayEnd()") + x.line("} else {") + x.line("r.WriteMapEnd()") + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + elemBytes := t.Elem().Kind() == reflect.Uint8 + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytesRaw([]byte(%s))", varname) + return + } + if t.Kind() == reflect.Array && elemBytes { + x.linef("r.EncodeStringBytesRaw(((*[%d]byte)(%s))[:])", t.Len(), varname) + return + } + i := x.varsfx() + if t.Kind() == reflect.Chan { + type ts struct { + Label, Chan, Slice, Sfx string + } + tm, err := template.New("").Parse(genEncChanTmpl) + if err != nil { + panic(err) + } + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + x.linef("var sch%s []%s", i, x.genTypeName(t.Elem())) + err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i}) + if err != nil { + panic(err) + } + // x.linef("%s = sch%s", varname, i) + if elemBytes { + x.linef("r.EncodeStringBytesRaw([]byte(%s))", "sch"+i) + x.line("}") + return + } + varname = "sch" + i + } + + x.line("r.WriteArrayStart(len(" + varname + "))") + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.line("r.WriteArrayElem()") + + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteArrayEnd()") + if t.Kind() == reflect.Chan { + x.line("}") + } +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.WriteMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.line("r.WriteMapElemKey()") + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.line("r.WriteMapElemValue()") + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteMapEnd()") +} + +func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo, + newbuf, nilbuf *genBuf) (t2 reflect.StructField) { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + t2kind := t2typ.Kind() + var nilbufed bool + if si != nil { + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + t2kind = t2typ.Kind() + if t2kind != reflect.Ptr { + continue + } + if newbuf != nil { + newbuf.f("if %s == nil { %s = new(%s) }\n", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + if nilbuf != nil { + if !nilbufed { + nilbuf.s("if true") + nilbufed = true + } + nilbuf.s(" && ").s(varname3).s(" != nil") + } + } + } + // if t2typ.Kind() == reflect.Ptr { + // varname3 = varname3 + t2.Name + // } + if nilbuf != nil { + if nilbufed { + nilbuf.s(" { ") + } + if nilvar != "" { + nilbuf.s(nilvar).s(" = true") + } else if tk := t2typ.Kind(); tk == reflect.Ptr { + if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 { + nilbuf.s(varname3).s(" = nil") + } else { + nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem())) + } + } else { + nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ)) + } + if nilbufed { + nilbuf.s("}") + } + } + return t2 +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + var varname2 string + if t.Kind() != reflect.Ptr { + if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) { + x.dec(varname, t, false) + } + } else { + if checkNotNil { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + } + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + if checkNotNil { + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + } + // Should we create temp var if a slice/map indexing? No. dec(...) can now handle it. + + if ptrPfx == "" { + x.dec(varname, t, true) + } else { + varname2 = genTempVarPfx + "z" + rand + x.line(varname2 + " := " + ptrPfx + varname) + x.dec(varname2, t, true) + } + } +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) { + i := x.varsfx() + + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + + if canBeNil { + var buf genBuf + x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf) + x.linef("if r.TryDecodeAsNil() { %s } else {", buf.buf) + } else { + x.line("// cannot be nil") + } + + x.decVarMain(varname, i, t, checkNotNil) + + if canBeNil { + x.line("} ") + } +} + +// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true. +// t is always a basetype (i.e. not of kind reflect.Ptr). +func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if ti2.cs || ti2.csp { // t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, Raw, RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + mi := x.varsfx() + // x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + // x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if t == timeTyp { + x.linef("} else if !z.DecBasicHandle().TimeNotBuiltin { %s%v = r.DecodeTime()", ptrPfx, varname) + // return + } + if t == rawTyp { + x.linef("} else { %s%v = z.DecRaw()", ptrPfx, varname) + return + } + + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%s%v, 0, nil)", addrPfx, varname) + return + } + + // only check for extensions if the type is named, and has a packagePath. + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + // x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + + if ti2.bu || ti2.bup { // t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", addrPfx, varname) + } + if ti2.ju || ti2.jup { // t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", addrPfx, varname) + } else if ti2.tu || ti2.tup { // t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() { z.DecTextUnmarshal(%s%v)", addrPfx, varname) + } + + x.line("} else {") + + if x.decTryAssignPrimitive(varname, t, isptr) { + return + } + + switch t.Kind() { + case reflect.Array, reflect.Chan: + x.xtraSM(varname, t, false, isptr) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.linef("%s%s = r.DecodeBytes(%s(%s[]byte)(%s), false)", + ptrPfx, varname, ptrPfx, ptrPfx, varname) + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + // no need to create temp variable if isptr, or x.F or x[F] + if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 { + x.decStruct(varname, rtid, t) + } else { + varname2 := genTempVarPfx + "j" + mi + x.line(varname2 + " := &" + varname) + x.decStruct(varname2, rtid, t) + } + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + addrPfx + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + addrPfx + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) { + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) + + var ptr string + if isptr { + ptr = "*" + } + switch t.Kind() { + case reflect.Int: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Int8: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Int16: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Int32: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Int64: + x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t)) + + case reflect.Uint: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Uint8: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Uint16: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Uint32: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Uint64: + x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t)) + case reflect.Uintptr: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + + case reflect.Float32: + x.linef("%s%s = (%s)(r.DecodeFloat32As64())", ptr, varname, x.genTypeName(t)) + case reflect.Float64: + x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t)) + + case reflect.Bool: + x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t)) + case reflect.String: + x.linef("%s%s = (%s)(r.DecodeString())", ptr, varname, x.genTypeName(t)) + default: + return false + } + return true +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:], true)", t.Len(), varname) + return + } + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, "", telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, "", tkey, false, true) + return "" + } + funcs["decLineVar"] = func(varname, decodedNilVarname string) string { + x.decVar(varname, decodedNilVarname, telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + ti := x.ti.get(rtid, t) + i := x.varsfx() + kName := tpfx + "s" + i + + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.line("r.ReadMapElemKey()") + + // emulate decstructfieldkey + switch ti.keyType { + case valueTypeInt: + x.linef("%s := z.StringView(strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10))", kName) + case valueTypeUint: + x.linef("%s := z.StringView(strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10))", kName) + case valueTypeFloat: + x.linef("%s := z.StringView(strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64))", kName) + default: // string + x.linef("%s := z.StringView(r.DecodeStringAsBytes())", kName) + } + // x.linef("%s := z.StringView(r.DecStructFieldKey(codecSelferValueType%s%s, z.DecScratchArrayBuffer()))", kName, ti.keyType.String(), x.xs) + + x.line("r.ReadMapElemValue()") + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.line("r.ReadMapEnd()") +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) + x.line("r.ReadArrayElem()") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.line("r.ReadArrayElem()") + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.line("r.ReadArrayEnd()") +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // varname MUST be a ptr, or a struct field or a slice element. + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadMapEnd()") + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line(varname + ".codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line(varname + ".codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadArrayEnd()") + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")") + x.line("} ") +} + +// -------- + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + s = genStripVendor(s) + return +} + +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase32enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base32encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase32enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase32enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +func genStripVendor(s string) string { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + return s +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper.go new file mode 100644 index 0000000000000..9c0ed16a93118 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper.go @@ -0,0 +1,2926 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +const ( + scratchByteArrayLen = 32 + // initCollectionCap = 16 // 32 is defensive. 16 is preferred. + + // Support encoding.(Binary|Text)(Unm|M)arshaler. + // This constant flag will enable or disable it. + supportMarshalInterfaces = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true + + // arrayCacheLen is the length of the cache used in encoder or decoder for + // allowing zero-alloc initialization. + // arrayCacheLen = 8 + + // size of the cacheline: defaulting to value for archs: amd64, arm64, 386 + // should use "runtime/internal/sys".CacheLineSize, but that is not exposed. + cacheLineSize = 64 + + wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize + wordSize = wordSizeBits / 8 + + // so structFieldInfo fits into 8 bytes + maxLevelsEmbedding = 14 + + // useFinalizers=true configures finalizers to release pool'ed resources + // acquired by Encoder/Decoder during their GC. + // + // Note that calling SetFinalizer is always expensive, + // as code must be run on the systemstack even for SetFinalizer(t, nil). + // + // We document that folks SHOULD call Release() when done, or they can + // explicitly call SetFinalizer themselves e.g. + // runtime.SetFinalizer(e, (*Encoder).Release) + // runtime.SetFinalizer(d, (*Decoder).Release) + useFinalizers = false +) + +var oneByteArr [1]byte +var zeroByteSlice = oneByteArr[:0:0] + +var codecgen bool + +var refBitset bitset256 +var pool pooler +var panicv panicHdl + +func init() { + pool.init() + + refBitset.set(byte(reflect.Map)) + refBitset.set(byte(reflect.Ptr)) + refBitset.set(byte(reflect.Func)) + refBitset.set(byte(reflect.Chan)) +} + +type clsErr struct { + closed bool // is it closed? + errClosed error // error on closing +} + +// type entryType uint8 + +// const ( +// entryTypeBytes entryType = iota // make this 0, so a comparison is cheap +// entryTypeIo +// entryTypeBufio +// entryTypeUnset = 255 +// ) + +type charEncoding uint8 + +const ( + _ charEncoding = iota // make 0 unset + cUTF8 + cUTF16LE + cUTF16BE + cUTF32LE + cUTF32BE + // Deprecated: not a true char encoding value + cRAW charEncoding = 255 +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTime + valueTypeExt + + // valueTypeInvalid = 0xff +) + +var valueTypeStrings = [...]string{ + "Unset", + "Nil", + "Int", + "Uint", + "Float", + "Bool", + "String", + "Symbol", + "Bytes", + "Map", + "Array", + "Timestamp", + "Ext", +} + +func (x valueType) String() string { + if int(x) < len(valueTypeStrings) { + return valueTypeStrings[x] + } + return strconv.FormatInt(int64(x), 10) +} + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +// type sfiIdx struct { +// name string +// index int +// } + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// - even Java's PMD rules set TooManyFields threshold to 15. +// However, go has embedded fields, which should be regarded as +// top level, allowing structs to possibly double or triple. +// In addition, we don't want to keep creating transient arrays, +// especially for the sfi index tracking, and the evtypes tracking. +// +// So - try to keep typeInfoLoadArray within 2K bytes +const ( + typeInfoLoadArraySfisLen = 16 + typeInfoLoadArraySfiidxLen = 8 * 112 + typeInfoLoadArrayEtypesLen = 12 + typeInfoLoadArrayBLen = 8 * 4 +) + +type typeInfoLoad struct { + // fNames []string + // encNames []string + etypes []uintptr + sfis []structFieldInfo +} + +type typeInfoLoadArray struct { + // fNames [typeInfoLoadArrayLen]string + // encNames [typeInfoLoadArrayLen]string + sfis [typeInfoLoadArraySfisLen]structFieldInfo + sfiidx [typeInfoLoadArraySfiidxLen]byte + etypes [typeInfoLoadArrayEtypesLen]uintptr + b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names +} + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package + +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +type isZeroer interface { + IsZero() bool +} + +type codecError struct { + name string + err interface{} +} + +func (e codecError) Cause() error { + switch xerr := e.err.(type) { + case nil: + return nil + case error: + return xerr + case string: + return errors.New(xerr) + case fmt.Stringer: + return errors.New(xerr.String()) + default: + return fmt.Errorf("%v", e.err) + } +} + +func (e codecError) Error() string { + return fmt.Sprintf("%s error: %v", e.name, e.err) +} + +// type byteAccepter func(byte) bool + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) + uintptrTyp = reflect.TypeOf(uintptr(0)) + uint8Typ = reflect.TypeOf(uint8(0)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uintTyp = reflect.TypeOf(uint(0)) + intTyp = reflect.TypeOf(int(0)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem() + iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem() + + uint8TypId = rt2id(uint8Typ) + uint8SliceTypId = rt2id(uint8SliceTyp) + rawExtTypId = rt2id(rawExtTyp) + rawTypId = rt2id(rawTyp) + intfTypId = rt2id(intfTyp) + timeTypId = rt2id(timeTyp) + stringTypId = rt2id(stringTyp) + + mapStrIntfTypId = rt2id(mapStrIntfTyp) + mapIntfIntfTypId = rt2id(mapIntfIntfTyp) + intfSliceTypId = rt2id(intfSliceTyp) + // mapBySliceTypId = rt2id(mapBySliceTyp) + + intBitsize = uint8(intTyp.Bits()) + uintBitsize = uint8(uintTyp.Bits()) + + // bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo") +) + +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +var immutableKindsSet = [32]bool{ + // reflect.Invalid: , + reflect.Bool: true, + reflect.Int: true, + reflect.Int8: true, + reflect.Int16: true, + reflect.Int32: true, + reflect.Int64: true, + reflect.Uint: true, + reflect.Uint8: true, + reflect.Uint16: true, + reflect.Uint32: true, + reflect.Uint64: true, + reflect.Uintptr: true, + reflect.Float32: true, + reflect.Float64: true, + reflect.Complex64: true, + reflect.Complex128: true, + // reflect.Array + // reflect.Chan + // reflect.Func: true, + // reflect.Interface + // reflect.Map + // reflect.Ptr + // reflect.Slice + reflect.String: true, + reflect.Struct: true, + // reflect.UnsafePointer +} + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +// +// By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself. +// If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error. +// For example, the snippet below will cause such an error. +// +// type testSelferRecur struct{} +// func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) } +// func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) } +// +// Note: *the first set of bytes of any value MUST NOT represent nil in the format*. +// This is because, during each decode, we first check the the next set of bytes +// represent nil, and if so, we just set the value to nil. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) +} + +// MissingFielder defines the interface allowing structs to internally decode or encode +// values which do not map to struct fields. +// +// We expect that this interface is bound to a pointer type (so the mutation function works). +// +// A use-case is if a version of a type unexports a field, but you want compatibility between +// both versions during encoding and decoding. +// +// Note that the interface is completely ignored during codecgen. +type MissingFielder interface { + // CodecMissingField is called to set a missing field and value pair. + // + // It returns true if the missing field was set on the struct. + CodecMissingField(field []byte, value interface{}) bool + + // CodecMissingFields returns the set of fields which are not struct fields + CodecMissingFields() map[string]interface{} +} + +// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream. +// The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// Example usage: +// +// type T1 []string // or []int or []Point or any other "slice" type +// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map +// type T2 struct { KeyValues T1 } +// +// var kvs = []string{"one", "1", "two", "2", "three", "3"} +// var v2 = T2{ KeyValues: T1(kvs) } +// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} } +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +// - It MUST be a slice type (not a pointer receiver) that implements MapBySlice +type MapBySlice interface { + MapBySlice() +} + +// BasicHandle encapsulates the common options and extension functions. +// +// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +type BasicHandle struct { + // BasicHandle is always a part of a different type. + // It doesn't have to fit into it own cache lines. + + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls). + // If *[]T is used instead, this becomes comparable, at the cost of extra indirection. + // Thses slices are used all the time, so keep as slices (not pointers). + + extHandle + + intf2impls + + inited uint32 + _ uint32 // padding + + // ---- cache line + + RPCOptions + + // TimeNotBuiltin configures whether time.Time should be treated as a builtin type. + // + // All Handlers should know how to encode/decode time.Time as part of the core + // format specification, or as a standard extension defined by the format. + // + // However, users can elect to handle time.Time as a custom extension, or via the + // standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface. + // To elect this behavior, users can set TimeNotBuiltin=true. + // Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior + // (for Cbor and Msgpack), where time.Time was not a builtin supported type. + TimeNotBuiltin bool + + // ExplicitRelease configures whether Release() is implicitly called after an encode or + // decode call. + // + // If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...) + // on it or calling (Must)Encode repeatedly into a given []byte or io.Writer, + // then you do not want it to be implicitly closed after each Encode/Decode call. + // Doing so will unnecessarily return resources to the shared pool, only for you to + // grab them right after again to do another Encode/Decode call. + // + // Instead, you configure ExplicitRelease=true, and you explicitly call Release() when + // you are truly done. + // + // As an alternative, you can explicitly set a finalizer - so its resources + // are returned to the shared pool before it is garbage-collected. Do it as below: + // runtime.SetFinalizer(e, (*Encoder).Release) + // runtime.SetFinalizer(d, (*Decoder).Release) + ExplicitRelease bool + + be bool // is handle a binary encoding? + js bool // is handle javascript handler? + n byte // first letter of handle name + _ uint16 // padding + + // ---- cache line + + DecodeOptions + + // ---- cache line + + EncodeOptions + + // noBuiltInTypeChecker + + rtidFns atomicRtidFnSlice + mu sync.Mutex + // r []uintptr // rtids mapped to s above +} + +// basicHandle returns an initialized BasicHandle from the Handle. +func basicHandle(hh Handle) (x *BasicHandle) { + x = hh.getBasicHandle() + // ** We need to simulate once.Do, to ensure no data race within the block. + // ** Consequently, below would not work. + // if atomic.CompareAndSwapUint32(&x.inited, 0, 1) { + // x.be = hh.isBinary() + // _, x.js = hh.(*JsonHandle) + // x.n = hh.Name()[0] + // } + + // simulate once.Do using our own stored flag and mutex as a CompareAndSwap + // is not sufficient, since a race condition can occur within init(Handle) function. + // init is made noinline, so that this function can be inlined by its caller. + if atomic.LoadUint32(&x.inited) == 0 { + x.init(hh) + } + return +} + +//go:noinline +func (x *BasicHandle) init(hh Handle) { + // make it uninlineable, as it is called at most once + x.mu.Lock() + if x.inited == 0 { + x.be = hh.isBinary() + _, x.js = hh.(*JsonHandle) + x.n = hh.Name()[0] + atomic.StoreUint32(&x.inited, 1) + } + x.mu.Unlock() +} + +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} + +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos == nil { + return defTypeInfos.get(rtid, rt) + } + return x.TypeInfos.get(rtid, rt) +} + +func findFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) { + // binary search. adapted from sort/search.go. + // Note: we use goto (instead of for loop) so this can be inlined. + + // h, i, j := 0, 0, len(s) + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (x *BasicHandle) fn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { + rtid := rt2id(rt) + sp := x.rtidFns.load() + if sp != nil { + if _, fn = findFn(sp, rtid); fn != nil { + // xdebugf("<<<< %c: found fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) + return + } + } + c := x + // xdebugf("#### for %c: load fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) + fn = new(codecFn) + fi := &(fn.i) + ti := c.getTypeInfo(rtid, rt) + fi.ti = ti + + rk := reflect.Kind(ti.kind) + + if checkCodecSelfer && (ti.cs || ti.csp) { + fn.fe = (*Encoder).selferMarshal + fn.fd = (*Decoder).selferUnmarshal + fi.addrF = true + fi.addrD = ti.csp + fi.addrE = ti.csp + } else if rtid == timeTypId && !c.TimeNotBuiltin { + fn.fe = (*Encoder).kTime + fn.fd = (*Decoder).kTime + } else if rtid == rawTypId { + fn.fe = (*Encoder).raw + fn.fd = (*Decoder).raw + } else if rtid == rawExtTypId { + fn.fe = (*Encoder).rawExt + fn.fd = (*Decoder).rawExt + fi.addrF = true + fi.addrD = true + fi.addrE = true + } else if xfFn := c.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*Encoder).ext + fn.fd = (*Decoder).ext + fi.addrF = true + fi.addrD = true + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) { + fn.fe = (*Encoder).binaryMarshal + fn.fd = (*Decoder).binaryUnmarshal + fi.addrF = true + fi.addrD = ti.bup + fi.addrE = ti.bmp + } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) { + //If JSON, we should check JSONMarshal before textMarshal + fn.fe = (*Encoder).jsonMarshal + fn.fd = (*Decoder).jsonUnmarshal + fi.addrF = true + fi.addrD = ti.jup + fi.addrE = ti.jmp + } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) { + fn.fe = (*Encoder).textMarshal + fn.fd = (*Decoder).textUnmarshal + fi.addrF = true + fi.addrD = ti.tup + fi.addrE = ti.tmp + } else { + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if ti.pkgpath == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.fe = fastpathAV[idx].encfn + fn.fd = fastpathAV[idx].decfn + fi.addrD = true + fi.addrF = false + } + } else { + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(ti.key, ti.elem) + } else { + rtu = reflect.SliceOf(ti.elem) + } + rtuid := rt2id(rtu) + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf(e, xf, xrv.Convert(xrt)) + } + fi.addrD = true + fi.addrF = false // meaning it can be an address(ptr) or a value + xfnf2 := fastpathAV[idx].decfn + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) + } else { + xfnf2(d, xf, xrv.Convert(xrt)) + } + } + } + } + } + if fn.fe == nil && fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fe = (*Encoder).kBool + fn.fd = (*Decoder).kBool + case reflect.String: + fn.fe = (*Encoder).kString + fn.fd = (*Decoder).kString + case reflect.Int: + fn.fd = (*Decoder).kInt + fn.fe = (*Encoder).kInt + case reflect.Int8: + fn.fe = (*Encoder).kInt8 + fn.fd = (*Decoder).kInt8 + case reflect.Int16: + fn.fe = (*Encoder).kInt16 + fn.fd = (*Decoder).kInt16 + case reflect.Int32: + fn.fe = (*Encoder).kInt32 + fn.fd = (*Decoder).kInt32 + case reflect.Int64: + fn.fe = (*Encoder).kInt64 + fn.fd = (*Decoder).kInt64 + case reflect.Uint: + fn.fd = (*Decoder).kUint + fn.fe = (*Encoder).kUint + case reflect.Uint8: + fn.fe = (*Encoder).kUint8 + fn.fd = (*Decoder).kUint8 + case reflect.Uint16: + fn.fe = (*Encoder).kUint16 + fn.fd = (*Decoder).kUint16 + case reflect.Uint32: + fn.fe = (*Encoder).kUint32 + fn.fd = (*Decoder).kUint32 + case reflect.Uint64: + fn.fe = (*Encoder).kUint64 + fn.fd = (*Decoder).kUint64 + case reflect.Uintptr: + fn.fe = (*Encoder).kUintptr + fn.fd = (*Decoder).kUintptr + case reflect.Float32: + fn.fe = (*Encoder).kFloat32 + fn.fd = (*Decoder).kFloat32 + case reflect.Float64: + fn.fe = (*Encoder).kFloat64 + fn.fd = (*Decoder).kFloat64 + case reflect.Invalid: + fn.fe = (*Encoder).kInvalid + fn.fd = (*Decoder).kErr + case reflect.Chan: + fi.seq = seqTypeChan + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.fe = (*Encoder).kSlice + fi.addrF = false + fi.addrD = false + rt2 := reflect.SliceOf(ti.elem) + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + d.h.fn(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) + } + // fn.fd = (*Decoder).kArray + case reflect.Struct: + if ti.anyOmitEmpty || ti.mf || ti.mfp { + fn.fe = (*Encoder).kStruct + } else { + fn.fe = (*Encoder).kStructNoOmitempty + } + fn.fd = (*Decoder).kStruct + case reflect.Map: + fn.fe = (*Encoder).kMap + fn.fd = (*Decoder).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fd = (*Decoder).kInterface + fn.fe = (*Encoder).kErr + default: + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + fn.fe = (*Encoder).kErr + fn.fd = (*Decoder).kErr + } + } + } + + c.mu.Lock() + var sp2 []codecRtidFn + sp = c.rtidFns.load() + if sp == nil { + sp2 = []codecRtidFn{{rtid, fn}} + c.rtidFns.store(sp2) + // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) + // xdebugf(">>>> loading stored rtidfns of size: %v", len(c.rtidFns.load())) + } else { + idx, fn2 := findFn(sp, rtid) + if fn2 == nil { + sp2 = make([]codecRtidFn, len(sp)+1) + copy(sp2, sp[:idx]) + copy(sp2[idx+1:], sp[idx:]) + sp2[idx] = codecRtidFn{rtid, fn} + c.rtidFns.store(sp2) + // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) + + } + } + c.mu.Unlock() + return +} + +// Handle defines a specific encoding format. It also stores any runtime state +// used during an Encoding or Decoding session e.g. stored state about Types, etc. +// +// Once a handle is configured, it can be shared across multiple Encoders and Decoders. +// +// Note that a Handle is NOT safe for concurrent modification. +// Consequently, do not modify it after it is configured if shared among +// multiple Encoders and Decoders in different goroutines. +// +// Consequently, the typical usage model is that a Handle is pre-configured +// before first time use, and not modified while in use. +// Such a pre-configured Handle is safe for concurrent access. +type Handle interface { + Name() string + // return the basic handle. It may not have been inited. + // Prefer to use basicHandle() helper function that ensures it has been inited. + getBasicHandle() *BasicHandle + recreateEncDriver(encDriver) bool + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool + hasElemSeparators() bool + // IsBuiltinType(rtid uintptr) bool +} + +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and retrieve the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour +// behind an Encode flag which must be explicitly set. +type Raw []byte + +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt +// if there is no registered extension for the tag. +// +// Only one of Data or Value is nil. +// If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor, json) which leverage the format to do + // custom serialization of the types. + Value interface{} +} + +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + WriteExt(v interface{}) []byte + + // ReadExt updates a value from a []byte. + // + // Note: dst is always a pointer kind to the registered extension type. + ReadExt(dst interface{}, src []byte) +} + +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding + // e.g. convert time.Time to int64. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + ConvertExt(v interface{}) interface{} + + // UpdateExt updates a value from a simpler interface for easy decoding + // e.g. convert int64 to time.Time. + // + // Note: dst is always a pointer kind to the registered extension type. + UpdateExt(dst interface{}, src interface{}) +} + +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} + +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) + } + return bs +} + +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) + } +} + +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} + +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) +} + +type extWrapper struct { + BytesExt + InterfaceExt +} + +type bytesExtFailer struct{} + +func (bytesExtFailer) WriteExt(v interface{}) []byte { + panicv.errorstr("BytesExt.WriteExt is not supported") + return nil +} +func (bytesExtFailer) ReadExt(v interface{}, bs []byte) { + panicv.errorstr("BytesExt.ReadExt is not supported") +} + +type interfaceExtFailer struct{} + +func (interfaceExtFailer) ConvertExt(v interface{}) interface{} { + panicv.errorstr("InterfaceExt.ConvertExt is not supported") + return nil +} +func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) { + panicv.errorstr("InterfaceExt.UpdateExt is not supported") +} + +type binaryEncodingType struct{} + +func (binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. + +// type noBuiltInTypeChecker struct{} +// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } +// type noBuiltInTypes struct{ noBuiltInTypeChecker } + +type noBuiltInTypes struct{} + +func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +// type noStreamingCodec struct{} +// func (noStreamingCodec) CheckBreak() bool { return false } +// func (noStreamingCodec) hasElemSeparators() bool { return false } + +type noElemSeparators struct{} + +func (noElemSeparators) hasElemSeparators() (v bool) { return } +func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w *encWriterSwitch +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rtidptr uintptr + rt reflect.Type + tag uint64 + ext Ext + _padding [1]uint64 // padding +} + +type extHandle []extTypeTagFn + +// AddExt registes an encode and decode function for a reflect.Type. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) AddExt(rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// SetExt will set the extension for a tag and reflect.Type. +// Note that the type must be a named type, and specifically not a pointer or Interface. +// An error is returned if that is not honored. +// To Deregister an ext, call SetExt with nil Ext. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + rk := rt.Kind() + for rk == reflect.Ptr { + rt = rt.Elem() + rk = rt.Kind() + } + + if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr { + return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt) + } + + rtid := rt2id(rt) + switch rtid { + case timeTypId, rawTypId, rawExtTypId: + // all natively supported type, so cannot have an extension + return // TODO: should we silently ignore, or return an error??? + } + o2 := *o + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + rtidptr := rt2id(reflect.PtrTo(rt)) + *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}}) + return +} + +func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.rtid == rtid || v.rtidptr == rtid { + return + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.tag == tag { + return + } + } + return nil +} + +type intf2impl struct { + rtid uintptr // for intf + impl reflect.Type + // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned. +} + +type intf2impls []intf2impl + +// Intf2Impl maps an interface to an implementing type. +// This allows us support infering the concrete type +// and populating it when passed an interface. +// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc. +// +// Passing a nil impl will clear the mapping. +func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) { + if impl != nil && !impl.Implements(intf) { + return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf) + } + rtid := rt2id(intf) + o2 := *o + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.impl = impl + return + } + } + *o = append(o2, intf2impl{rtid, impl}) + return +} + +func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) { + for i := range o { + v := &o[i] + if v.rtid == rtid { + if v.impl == nil { + return + } + if v.impl.Kind() == reflect.Ptr { + return reflect.New(v.impl.Elem()) + } + return reflect.New(v.impl).Elem() + } + } + return +} + +type structFieldInfoFlag uint8 + +const ( + _ structFieldInfoFlag = 1 << iota + structFieldInfoFlagReady + structFieldInfoFlagOmitEmpty +) + +func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) { + *x = *x | f +} + +func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) { + *x = *x &^ f +} + +func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool { + return x&f != 0 +} + +func (x structFieldInfoFlag) omitEmpty() bool { + return x.flagGet(structFieldInfoFlagOmitEmpty) +} + +func (x structFieldInfoFlag) ready() bool { + return x.flagGet(structFieldInfoFlagReady) +} + +type structFieldInfo struct { + encName string // encode name + fieldName string // field name + + is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct + nis uint8 // num levels of embedding. if 1, then it's not embedded. + + encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers + structFieldInfoFlag + _ [1]byte // padding +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if v, valid := si.field(v, false); valid { + v.Set(reflect.Zero(v.Type())) + } +} + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { + // replicate FieldByIndex + for i, x := range si.is { + if uint8(i) == si.nis { + break + } + if v, valid = baseStructRv(v, update); !valid { + return + } + v = v.Field(int(x)) + } + + return v, true +} + +// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { +// v, _ = si.field(v, update) +// return v +// } + +func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) { + keytype = valueTypeString // default + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + } else { + switch s { + case "omitempty": + omitEmpty = true + case "toarray": + toArray = true + case "int": + keytype = valueTypeInt + case "uint": + keytype = valueTypeUint + case "float": + keytype = valueTypeFloat + // case "bool": + // keytype = valueTypeBool + case "string": + keytype = valueTypeString + } + } + } + return +} + +func (si *structFieldInfo) parseTag(stag string) { + // if fname == "" { + // panic(errNoFieldNameToStructFieldInfo) + // } + + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.flagSet(structFieldInfoFlagOmitEmpty) + // si.omitEmpty = true + // case "toarray": + // si.toArray = true + } + } + } +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { return len(p) } +func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName } +func (p sfiSortedByEncName) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +const structFieldNodeNumToCache = 4 + +type structFieldNodeCache struct { + rv [structFieldNodeNumToCache]reflect.Value + idx [structFieldNodeNumToCache]uint32 + num uint8 +} + +func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { + for i, k := range &x.idx { + if uint8(i) == x.num { + return // break + } + if key == k { + return x.rv[i], true + } + } + return +} + +func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { + if x.num < structFieldNodeNumToCache { + x.rv[x.num] = fv + x.idx[x.num] = key + x.num++ + return + } +} + +type structFieldNode struct { + v reflect.Value + cache2 structFieldNodeCache + cache3 structFieldNodeCache + update bool +} + +func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { + // return si.fieldval(x.v, x.update) + // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding + // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. + var valid bool + switch si.nis { + case 1: + fv = x.v.Field(int(si.is[0])) + case 2: + if fv, valid = x.cache2.get(uint32(si.is[0])); valid { + fv = fv.Field(int(si.is[1])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache2.tryAdd(fv, uint32(si.is[0])) + fv = fv.Field(int(si.is[1])) + case 3: + var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) + if fv, valid = x.cache3.get(key); valid { + fv = fv.Field(int(si.is[2])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + fv = fv.Field(int(si.is[1])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache3.tryAdd(fv, key) + fv = fv.Field(int(si.is[2])) + default: + fv, _ = si.field(x.v, x.update) + } + return +} + +func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v, true +} + +type typeInfoFlag uint8 + +const ( + typeInfoFlagComparable = 1 << iota + typeInfoFlagIsZeroer + typeInfoFlagIsZeroerPtr +) + +// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + rt reflect.Type + elem reflect.Type + pkgpath string + + rtid uintptr + // rv0 reflect.Value // saved zero value, used if immutableKind + + numMeth uint16 // number of methods + kind uint8 + chandir uint8 + + anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty" + toArray bool // whether this (struct) type should be encoded as an array + keyType valueType // if struct, how is the field name stored in a stream? default is string + mbs bool // base type (T or *T) is a MapBySlice + + // ---- cpu cache line boundary? + sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + key reflect.Type + + // ---- cpu cache line boundary? + // sfis []structFieldInfo // all sfi, in src order, as created. + sfiNamesSort []byte // all names, with indexes into the sfiSort + + // format of marshal type fields below: [btj][mu]p? OR csp? + + bm bool // T is a binaryMarshaler + bmp bool // *T is a binaryMarshaler + bu bool // T is a binaryUnmarshaler + bup bool // *T is a binaryUnmarshaler + tm bool // T is a textMarshaler + tmp bool // *T is a textMarshaler + tu bool // T is a textUnmarshaler + tup bool // *T is a textUnmarshaler + + jm bool // T is a jsonMarshaler + jmp bool // *T is a jsonMarshaler + ju bool // T is a jsonUnmarshaler + jup bool // *T is a jsonUnmarshaler + cs bool // T is a Selfer + csp bool // *T is a Selfer + mf bool // T is a MissingFielder + mfp bool // *T is a MissingFielder + + // other flags, with individual bits representing if set. + flags typeInfoFlag + infoFieldOmitempty bool + + _ [6]byte // padding + _ [2]uint64 // padding +} + +func (ti *typeInfo) isFlag(f typeInfoFlag) bool { + return ti.flags&f != 0 +} + +func (ti *typeInfo) indexForEncName(name []byte) (index int16) { + var sn []byte + if len(name)+2 <= 32 { + var buf [32]byte // should not escape to heap + sn = buf[:len(name)+2] + } else { + sn = make([]byte, len(name)+2) + } + copy(sn[1:], name) + sn[0], sn[len(sn)-1] = tiSep2(name), 0xff + j := bytes.Index(ti.sfiNamesSort, sn) + if j < 0 { + return -1 + } + index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8) + return +} + +type rtid2ti struct { + rtid uintptr + ti *typeInfo +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected + infos atomicTypeInfoSlice + mu sync.Mutex + tags []string + _ [2]uint64 // padding +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) { + // binary search. adapted from sort/search.go. + // Note: we use goto (instead of for loop) so this can be inlined. + + // if sp == nil { + // return -1, nil + // } + // s := *sp + + // h, i, j := 0, 0, len(s) + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + ti = s[i].ti + } + return +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + sp := x.infos.load() + if sp != nil { + _, pti = findTypeInfo(sp, rtid) + if pti != nil { + return + } + } + + rk := rt.Kind() + + if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) { + panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt) + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{ + rt: rt, + rtid: rtid, + kind: uint8(rk), + pkgpath: rt.PkgPath(), + keyType: valueTypeString, // default it - so it's never 0 + } + // ti.rv0 = reflect.Zero(rt) + + // ti.comparable = rt.Comparable() + ti.numMeth = uint16(rt.NumMethod()) + + ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp) + ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp) + ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp) + ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp) + ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp) + ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp) + ti.cs, ti.csp = implIntf(rt, selferTyp) + ti.mf, ti.mfp = implIntf(rt, missingFielderTyp) + + b1, b2 := implIntf(rt, iszeroTyp) + if b1 { + ti.flags |= typeInfoFlagIsZeroer + } + if b2 { + ti.flags |= typeInfoFlagIsZeroerPtr + } + if rt.Comparable() { + ti.flags |= typeInfoFlagComparable + } + + switch rk { + case reflect.Struct: + var omitEmpty bool + if f, ok := rt.FieldByName(structInfoFieldName); ok { + ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag)) + ti.infoFieldOmitempty = omitEmpty + } else { + ti.keyType = valueTypeString + } + pp, pi := &pool.tiload, pool.tiload.Get() // pool.tiLoad() + pv := pi.(*typeInfoLoadArray) + pv.etypes[0] = ti.rtid + // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + // ti.sfis = vv.sfis + ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv) + pp.Put(pi) + case reflect.Map: + ti.elem = rt.Elem() + ti.key = rt.Key() + case reflect.Slice: + ti.mbs, _ = implIntf(rt, mapBySliceTyp) + ti.elem = rt.Elem() + case reflect.Chan: + ti.elem = rt.Elem() + ti.chandir = uint8(rt.ChanDir()) + case reflect.Array, reflect.Ptr: + ti.elem = rt.Elem() + } + // sfi = sfiSrc + + x.mu.Lock() + sp = x.infos.load() + var sp2 []rtid2ti + if sp == nil { + pti = &ti + sp2 = []rtid2ti{{rtid, pti}} + x.infos.store(sp2) + } else { + var idx uint + idx, pti = findTypeInfo(sp, rtid) + if pti == nil { + pti = &ti + sp2 = make([]rtid2ti, len(sp)+1) + copy(sp2, sp[:idx]) + copy(sp2[idx+1:], sp[idx:]) + sp2[idx] = rtid2ti{rtid, pti} + x.infos.store(sp2) + } + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []uint16, pv *typeInfoLoad) { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + flen := rt.NumField() + if flen > (1< %v fields are not supported - has %v fields", + (1<= 0; i-- { // bounds-check elimination + b := si.encName[i] + if (b >= '0' && b <= '9') || (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') { + continue + } + si.encNameAsciiAlphaNum = false + break + } + si.fieldName = f.Name + si.flagSet(structFieldInfoFlagReady) + + // pv.encNames = append(pv.encNames, si.encName) + + // si.ikind = int(f.Type.Kind()) + if len(indexstack) > maxLevelsEmbedding-1 { + panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth", + maxLevelsEmbedding-1, len(indexstack)) + } + si.nis = uint8(len(indexstack)) + 1 + copy(si.is[:], indexstack) + si.is[len(indexstack)] = j + + if omitEmpty { + si.flagSet(structFieldInfoFlagOmitEmpty) + } + pv.sfis = append(pv.sfis, si) + } +} + +func tiSep(name string) uint8 { + // (xn[0]%64) // (between 192-255 - outside ascii BMP) + // return 0xfe - (name[0] & 63) + // return 0xfe - (name[0] & 63) - uint8(len(name)) + // return 0xfe - (name[0] & 63) - uint8(len(name)&63) + // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07)) + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +func tiSep2(name []byte) uint8 { + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) ( + y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) { + sa := pv.sfiidx[:0] + sn := pv.b[:] + n := len(x) + + var xn string + var ui uint16 + var sep byte + + for i := range x { + ui = uint16(i) + xn = x[i].encName // fieldName or encName? use encName for now. + if len(xn)+2 > cap(pv.b) { + sn = make([]byte, len(xn)+2) + } else { + sn = sn[:len(xn)+2] + } + // use a custom sep, so that misses are less frequent, + // since the sep (first char in search) is as unique as first char in field name. + sep = tiSep(xn) + sn[0], sn[len(sn)-1] = sep, 0xff + copy(sn[1:], xn) + j := bytes.Index(sa, sn) + if j == -1 { + sa = append(sa, sep) + sa = append(sa, xn...) + sa = append(sa, 0xff, byte(ui>>8), byte(ui)) + } else { + index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8 + // one of them must be reset to nil, + // and the index updated appropriately to the other one + if x[i].nis == x[index].nis { + } else if x[i].nis < x[index].nis { + sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui) + if x[index].ready() { + x[index].flagClr(structFieldInfoFlagReady) + n-- + } + } else { + if x[i].ready() { + x[i].flagClr(structFieldInfoFlagReady) + n-- + } + } + } + + } + var w []structFieldInfo + sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray + if sharingArray { + w = make([]structFieldInfo, n) + } + + // remove all the nils (non-ready) + y = make([]*structFieldInfo, n) + n = 0 + var sslen int + for i := range x { + if !x[i].ready() { + continue + } + if !anyOmitEmpty && x[i].omitEmpty() { + anyOmitEmpty = true + } + if sharingArray { + w[n] = x[i] + y[n] = &w[n] + } else { + y[n] = &x[i] + } + sslen = sslen + len(x[i].encName) + 4 + n++ + } + if n != len(y) { + panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", + rt, len(y), len(x), n) + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + + sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen + if sharingArray { + ss = make([]byte, 0, sslen) + } else { + ss = sa[:0] // reuse the newly made sa array if necessary + } + for i := range z { + xn = z[i].encName + sep = tiSep(xn) + ui = uint16(i) + ss = append(ss, sep) + ss = append(ss, xn...) + ss = append(ss, 0xff, byte(ui>>8), byte(ui)) + } + return +} + +func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { + return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp) +} + +// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty: +// - does it implement IsZero() bool +// - is it comparable, and can i compare directly using == +// - if checkStruct, then walk through the encodable fields +// and check if they are empty or not. +func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + // v is a struct kind - no need to check again. + // We only check isZero on a struct kind, to reduce the amount of times + // that we lookup the rtid and typeInfo for each type as we walk the tree. + + vt := v.Type() + rtid := rt2id(vt) + if tinfos == nil { + tinfos = defTypeInfos + } + ti := tinfos.get(rtid, vt) + if ti.rtid == timeTypId { + return rv2i(v).(time.Time).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() { + return rv2i(v.Addr()).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroer) { + return rv2i(v).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagComparable) { + return rv2i(v) == rv2i(reflect.Zero(vt)) + } + if !checkStruct { + return false + } + // We only care about what we can encode/decode, + // so that is what we use to check omitEmpty. + for _, si := range ti.sfiSrc { + sfv, valid := si.field(v, false) + if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) { + return false + } + } + return true +} + +// func roundFloat(x float64) float64 { +// t := math.Trunc(x) +// if math.Abs(x-t) >= 0.5 { +// return t + math.Copysign(1, x) +// } +// return t +// } + +func panicToErr(h errDecorator, err *error) { + // Note: This method MUST be called directly from defer i.e. defer panicToErr ... + // else it seems the recover is not fully handled + if recoverPanicToErr { + if x := recover(); x != nil { + // fmt.Printf("panic'ing with: %v\n", x) + // debug.PrintStack() + panicValToErr(h, x, err) + } + } +} + +func panicValToErr(h errDecorator, v interface{}, err *error) { + switch xerr := v.(type) { + case nil: + case error: + switch xerr { + case nil: + case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized: + // treat as special (bubble up) + *err = xerr + default: + h.wrapErr(xerr, err) + } + case string: + if xerr != "" { + h.wrapErr(xerr, err) + } + case fmt.Stringer: + if xerr != nil { + h.wrapErr(xerr, err) + } + default: + h.wrapErr(v, err) + } +} + +func isImmutableKind(k reflect.Kind) (v bool) { + // return immutableKindsSet[k] + // since we know reflect.Kind is in range 0..31, then use the k%32 == k constraint + return immutableKindsSet[k%reflect.Kind(len(immutableKindsSet))] // bounds-check-elimination +} + +// ---- + +type codecFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType + addrD bool + addrF bool // if addrD, this says whether decode function can take a value or a ptr + addrE bool +} + +// codecFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type codecFn struct { + i codecFnInfo + fe func(*Encoder, *codecFnInfo, reflect.Value) + fd func(*Decoder, *codecFnInfo, reflect.Value) + _ [1]uint64 // padding +} + +type codecRtidFn struct { + rtid uintptr + fn *codecFn +} + +// ---- + +// these "checkOverflow" functions must be inlinable, and not call anybody. +// Overflow means that the value cannot be represented without wrapping/overflow. +// Overflow=false does not mean that the value can be represented without losing precision +// (especially for floating point). + +type checkOverflow struct{} + +// func (checkOverflow) Float16(f float64) (overflow bool) { +// panicv.errorf("unimplemented") +// if f < 0 { +// f = -f +// } +// return math.MaxFloat32 < f && f <= math.MaxFloat64 +// } + +func (checkOverflow) Float32(v float64) (overflow bool) { + if v < 0 { + v = -v + } + return math.MaxFloat32 < v && v <= math.MaxFloat64 +} +func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) SignedInt(v uint64) (overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + } + } + return +} + +func (x checkOverflow) Float32V(v float64) float64 { + if x.Float32(v) { + panicv.errorf("float32 overflow: %v", v) + } + return v +} +func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 { + if x.Uint(v, bitsize) { + panicv.errorf("uint64 overflow: %v", v) + } + return v +} +func (x checkOverflow) IntV(v int64, bitsize uint8) int64 { + if x.Int(v, bitsize) { + panicv.errorf("int64 overflow: %v", v) + } + return v +} +func (x checkOverflow) SignedIntV(v uint64) int64 { + if x.SignedInt(v) { + panicv.errorf("uint64 to int64 overflow: %v", v) + } + return int64(v) +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type ioFlusher interface { + Flush() error +} + +type ioPeeker interface { + Peek(int) ([]byte, error) +} + +type ioBuffered interface { + Buffered() int +} + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 + +// type uintptrSlice []uintptr +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string + +// type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p uintSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// func (p uintptrSlice) Len() int { return len(p) } +// func (p uintptrSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +// func (p uintptrSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[uint(i)] < p[uint(j)] || isNaN(p[uint(i)]) && !isNaN(p[uint(j)]) +} +func (p floatSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// func (p bytesSlice) Len() int { return len(p) } +// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)], p[uint(j)]) == -1 } +// func (p bytesSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[uint(i)] && p[uint(j)] } +func (p boolSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// --------------------- + +type sfiRv struct { + v *structFieldInfo + r reflect.Value +} + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv +type timeRv struct { + v time.Time + r reflect.Value +} +type timeRvSlice []timeRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p intRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p uintRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[uint(i)].v < p[uint(j)].v || isNaN(p[uint(i)].v) && !isNaN(p[uint(j)].v) +} +func (p floatRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[uint(i)].v && p[uint(j)].v } +func (p boolRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p timeRvSlice) Len() int { return len(p) } +func (p timeRvSlice) Less(i, j int) bool { return p[uint(i)].v.Before(p[uint(j)].v) } +func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + x := *s + if len(x) == 0 { + return + } + if len(x) == 1 { + if x[0] == v { + x[0] = 0 + } + return + } + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return + } + } + return +} + +// ------ + +// bitset types are better than [256]bool, because they permit the whole +// bitset array being on a single cache line and use less memory. +// +// Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap). +// +// We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces +// bounds checking, so we discarded them, and everyone uses bitset256. +// +// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). +// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 + +type bitset256 [32]byte + +func (x *bitset256) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +// func (x *bitset256) issetv(pos byte) byte { +// return x[pos>>3] & (1 << (pos & 7)) +// } + +func (x *bitset256) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} + +// func (x *bitset256) unset(pos byte) { +// x[pos>>3] &^= (1 << (pos & 7)) +// } + +// type bit2set256 [64]byte + +// func (x *bit2set256) set(pos byte, v1, v2 bool) { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// if v1 { +// x[pos>>2] |= 1 << (pos2 + 1) +// } +// if v2 { +// x[pos>>2] |= 1 << pos2 +// } +// } +// func (x *bit2set256) get(pos byte) uint8 { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011 +// } + +// ------------ + +type pooler struct { + // function-scoped pooled resources + tiload sync.Pool // for type info loading + sfiRv8, sfiRv16, sfiRv32, sfiRv64, sfiRv128 sync.Pool // for struct encoding + + // lifetime-scoped pooled resources + // dn sync.Pool // for decNaked + buf1k, buf2k, buf4k, buf8k, buf16k, buf32k, buf64k sync.Pool // for [N]byte +} + +func (p *pooler) init() { + p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } + + p.sfiRv8.New = func() interface{} { return new([8]sfiRv) } + p.sfiRv16.New = func() interface{} { return new([16]sfiRv) } + p.sfiRv32.New = func() interface{} { return new([32]sfiRv) } + p.sfiRv64.New = func() interface{} { return new([64]sfiRv) } + p.sfiRv128.New = func() interface{} { return new([128]sfiRv) } + + // p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } + + p.buf1k.New = func() interface{} { return new([1 * 1024]byte) } + p.buf2k.New = func() interface{} { return new([2 * 1024]byte) } + p.buf4k.New = func() interface{} { return new([4 * 1024]byte) } + p.buf8k.New = func() interface{} { return new([8 * 1024]byte) } + p.buf16k.New = func() interface{} { return new([16 * 1024]byte) } + p.buf32k.New = func() interface{} { return new([32 * 1024]byte) } + p.buf64k.New = func() interface{} { return new([64 * 1024]byte) } + +} + +// func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) { +// return &p.strRv8, p.strRv8.Get() +// } +// func (p *pooler) sfiRv16() (sp *sync.Pool, v interface{}) { +// return &p.strRv16, p.strRv16.Get() +// } +// func (p *pooler) sfiRv32() (sp *sync.Pool, v interface{}) { +// return &p.strRv32, p.strRv32.Get() +// } +// func (p *pooler) sfiRv64() (sp *sync.Pool, v interface{}) { +// return &p.strRv64, p.strRv64.Get() +// } +// func (p *pooler) sfiRv128() (sp *sync.Pool, v interface{}) { +// return &p.strRv128, p.strRv128.Get() +// } + +// func (p *pooler) bytes1k() (sp *sync.Pool, v interface{}) { +// return &p.buf1k, p.buf1k.Get() +// } +// func (p *pooler) bytes2k() (sp *sync.Pool, v interface{}) { +// return &p.buf2k, p.buf2k.Get() +// } +// func (p *pooler) bytes4k() (sp *sync.Pool, v interface{}) { +// return &p.buf4k, p.buf4k.Get() +// } +// func (p *pooler) bytes8k() (sp *sync.Pool, v interface{}) { +// return &p.buf8k, p.buf8k.Get() +// } +// func (p *pooler) bytes16k() (sp *sync.Pool, v interface{}) { +// return &p.buf16k, p.buf16k.Get() +// } +// func (p *pooler) bytes32k() (sp *sync.Pool, v interface{}) { +// return &p.buf32k, p.buf32k.Get() +// } +// func (p *pooler) bytes64k() (sp *sync.Pool, v interface{}) { +// return &p.buf64k, p.buf64k.Get() +// } + +// func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { +// return &p.tiload, p.tiload.Get() +// } + +// func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { +// return &p.dn, p.dn.Get() +// } + +// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) { +// sp := &(p.dn) +// vv := sp.Get() +// return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) } +// } +// func (p *pooler) decNakedGet() (v interface{}) { +// return p.dn.Get() +// } +// func (p *pooler) tiLoadGet() (v interface{}) { +// return p.tiload.Get() +// } +// func (p *pooler) decNakedPut(v interface{}) { +// p.dn.Put(v) +// } +// func (p *pooler) tiLoadPut(v interface{}) { +// p.tiload.Put(v) +// } + +// ---------------------------------------------------- + +type panicHdl struct{} + +func (panicHdl) errorv(err error) { + if err != nil { + panic(err) + } +} + +func (panicHdl) errorstr(message string) { + if message != "" { + panic(message) + } +} + +func (panicHdl) errorf(format string, params ...interface{}) { + if format == "" { + } else if len(params) == 0 { + panic(format) + } else { + panic(fmt.Sprintf(format, params...)) + } +} + +// ---------------------------------------------------- + +type errDecorator interface { + wrapErr(in interface{}, out *error) +} + +type errDecoratorDef struct{} + +func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) } + +// ---------------------------------------------------- + +type must struct{} + +func (must) String(s string, err error) string { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Int(s int64, err error) int64 { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Uint(s uint64, err error) uint64 { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Float(s float64, err error) float64 { + if err != nil { + panicv.errorv(err) + } + return s +} + +// ------------------- + +type bytesBufPooler struct { + pool *sync.Pool + poolbuf interface{} +} + +func (z *bytesBufPooler) end() { + if z.pool != nil { + z.pool.Put(z.poolbuf) + z.pool, z.poolbuf = nil, nil + } +} + +func (z *bytesBufPooler) get(bufsize int) (buf []byte) { + // ensure an end is called first (if necessary) + if z.pool != nil { + z.pool.Put(z.poolbuf) + z.pool, z.poolbuf = nil, nil + } + + if bufsize <= 1*1024 { + z.pool, z.poolbuf = &pool.buf1k, pool.buf1k.Get() // pool.bytes1k() + buf = z.poolbuf.(*[1 * 1024]byte)[:] + } else if bufsize <= 2*1024 { + z.pool, z.poolbuf = &pool.buf2k, pool.buf2k.Get() // pool.bytes2k() + buf = z.poolbuf.(*[2 * 1024]byte)[:] + } else if bufsize <= 4*1024 { + z.pool, z.poolbuf = &pool.buf4k, pool.buf4k.Get() // pool.bytes4k() + buf = z.poolbuf.(*[4 * 1024]byte)[:] + } else if bufsize <= 8*1024 { + z.pool, z.poolbuf = &pool.buf8k, pool.buf8k.Get() // pool.bytes8k() + buf = z.poolbuf.(*[8 * 1024]byte)[:] + } else if bufsize <= 16*1024 { + z.pool, z.poolbuf = &pool.buf16k, pool.buf16k.Get() // pool.bytes16k() + buf = z.poolbuf.(*[16 * 1024]byte)[:] + } else if bufsize <= 32*1024 { + z.pool, z.poolbuf = &pool.buf32k, pool.buf32k.Get() // pool.bytes32k() + buf = z.poolbuf.(*[32 * 1024]byte)[:] + } else { + z.pool, z.poolbuf = &pool.buf64k, pool.buf64k.Get() // pool.bytes64k() + buf = z.poolbuf.(*[64 * 1024]byte)[:] + } + return +} + +// ---------------- + +type sfiRvPooler struct { + pool *sync.Pool + poolv interface{} +} + +func (z *sfiRvPooler) end() { + if z.pool != nil { + z.pool.Put(z.poolv) + z.pool, z.poolv = nil, nil + } +} + +func (z *sfiRvPooler) get(newlen int) (fkvs []sfiRv) { + if newlen < 0 { // bounds-check-elimination + // cannot happen // here for bounds-check-elimination + } else if newlen <= 8 { + z.pool, z.poolv = &pool.sfiRv8, pool.sfiRv8.Get() // pool.sfiRv8() + fkvs = z.poolv.(*[8]sfiRv)[:newlen] + } else if newlen <= 16 { + z.pool, z.poolv = &pool.sfiRv16, pool.sfiRv16.Get() // pool.sfiRv16() + fkvs = z.poolv.(*[16]sfiRv)[:newlen] + } else if newlen <= 32 { + z.pool, z.poolv = &pool.sfiRv32, pool.sfiRv32.Get() // pool.sfiRv32() + fkvs = z.poolv.(*[32]sfiRv)[:newlen] + } else if newlen <= 64 { + z.pool, z.poolv = &pool.sfiRv64, pool.sfiRv64.Get() // pool.sfiRv64() + fkvs = z.poolv.(*[64]sfiRv)[:newlen] + } else if newlen <= 128 { + z.pool, z.poolv = &pool.sfiRv128, pool.sfiRv128.Get() // pool.sfiRv128() + fkvs = z.poolv.(*[128]sfiRv)[:newlen] + } else { + fkvs = make([]sfiRv, newlen) + } + return +} + +// safe-mod optimizations + +const safeMode = true + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// +// and call keepAlive4BytesView(v) at point where done with view. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// +// and call keepAlive4BytesView(v) at point where done with view. +func bytesView(v string) []byte { + return []byte(v) +} + +func definitelyNil(v interface{}) bool { + // this is a best-effort option. + // We just return false, so we don't unnecessarily incur the cost of reflection this early. + return false +} + +func rv2i(rv reflect.Value) interface{} { + return rv.Interface() +} + +func rt2id(rt reflect.Type) uintptr { + return reflect.ValueOf(rt).Pointer() +} + +func i2rtid(i interface{}) uintptr { + return reflect.ValueOf(reflect.TypeOf(i)).Pointer() +} + +// -------------------------- + +func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return v.IsNil() + case reflect.Struct: + return isEmptyStruct(v, tinfos, deref, checkStruct) + } + return false +} + +// -------------------------- +type atomicClsErr struct { + v atomic.Value +} + +func (x *atomicClsErr) load() (e clsErr) { + if i := x.v.Load(); i != nil { + e = i.(clsErr) + } + return +} + +func (x *atomicClsErr) store(p clsErr) { + x.v.Store(p) +} + +// -------------------------- +type atomicTypeInfoSlice struct { // expected to be 2 words + v atomic.Value +} + +func (x *atomicTypeInfoSlice) load() (e []rtid2ti) { + if i := x.v.Load(); i != nil { + e = i.([]rtid2ti) + } + return +} + +func (x *atomicTypeInfoSlice) store(p []rtid2ti) { + x.v.Store(p) +} + +// -------------------------- +type atomicRtidFnSlice struct { // expected to be 2 words + v atomic.Value +} + +func (x *atomicRtidFnSlice) load() (e []codecRtidFn) { + if i := x.v.Load(); i != nil { + e = i.([]codecRtidFn) + } + return +} + +func (x *atomicRtidFnSlice) store(p []codecRtidFn) { + x.v.Store(p) +} + +// -------------------------- +func (n *decNaked) ru() reflect.Value { + return reflect.ValueOf(&n.u).Elem() +} +func (n *decNaked) ri() reflect.Value { + return reflect.ValueOf(&n.i).Elem() +} +func (n *decNaked) rf() reflect.Value { + return reflect.ValueOf(&n.f).Elem() +} +func (n *decNaked) rl() reflect.Value { + return reflect.ValueOf(&n.l).Elem() +} +func (n *decNaked) rs() reflect.Value { + return reflect.ValueOf(&n.s).Elem() +} +func (n *decNaked) rt() reflect.Value { + return reflect.ValueOf(&n.t).Elem() +} +func (n *decNaked) rb() reflect.Value { + return reflect.ValueOf(&n.b).Elem() +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + rv.SetBytes(d.rawBytes()) +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + rv.SetString(d.d.DecodeString()) +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + rv.SetBool(d.d.DecodeBool()) +} + +func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { + rv.Set(reflect.ValueOf(d.d.DecodeTime())) +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + fv := d.d.DecodeFloat64() + if chkOvf.Float32(fv) { + d.errorf("float32 overflow: %v", fv) + } + rv.SetFloat(fv) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat64()) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt64()) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint64()) +} + +// ---------------- + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBool(rv.Bool()) +} + +func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeTime(rv2i(rv).(time.Time)) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + s := rv.String() + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(s)) + } else { + e.e.EncodeStringEnc(cUTF8, s) + } +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rv.Float()) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(float32(rv.Float())) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + if size < 0 { + return reflect.MakeMapWithSize(t, 4) + } + return reflect.MakeMapWithSize(t, size) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper_internal.go new file mode 100644 index 0000000000000..4f5eb84362e2b --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper_internal.go @@ -0,0 +1,89 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account + + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 + } + + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 + } + newCap = x * oldCap / 4 + + if num > 0 { + newCap += num + } + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) + } + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) + } + } + return +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/json.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/json.go new file mode 100644 index 0000000000000..5fc375f9286e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/json.go @@ -0,0 +1,1491 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "math" + "reflect" + "strconv" + "time" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var jsonLiterals = [...]byte{ + '"', 't', 'r', 'u', 'e', '"', + '"', 'f', 'a', 'l', 's', 'e', '"', + '"', 'n', 'u', 'l', 'l', '"', +} + +const ( + jsonLitTrueQ = 0 + jsonLitTrue = 1 + jsonLitFalseQ = 6 + jsonLitFalse = 7 + // jsonLitNullQ = 13 + jsonLitNull = 14 +) + +var ( + jsonLiteral4True = jsonLiterals[jsonLitTrue+1 : jsonLitTrue+4] + jsonLiteral4False = jsonLiterals[jsonLitFalse+1 : jsonLitFalse+5] + jsonLiteral4Null = jsonLiterals[jsonLitNull+1 : jsonLitNull+4] +) + +const ( + jsonU4Chk2 = '0' + jsonU4Chk1 = 'a' - 10 + jsonU4Chk0 = 'A' - 10 + + jsonScratchArrayLen = 64 +) + +const ( + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + jsonSpacesOrTabsLen = 128 + + jsonAlwaysReturnInternString = false +) + +var ( + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte + + jsonCharHtmlSafeSet bitset256 + jsonCharSafeSet bitset256 + jsonCharWhitespaceSet bitset256 + jsonNumSet bitset256 +) + +func init() { + var i byte + for i = 0; i < jsonSpacesOrTabsLen; i++ { + jsonSpaces[i] = ' ' + jsonTabs[i] = '\t' + } + + // populate the safe values as true: note: ASCII control characters are (0-31) + // jsonCharSafeSet: all true except (0-31) " \ + // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & + for i = 32; i < utf8.RuneSelf; i++ { + switch i { + case '"', '\\': + case '<', '>', '&': + jsonCharSafeSet.set(i) // = true + default: + jsonCharSafeSet.set(i) + jsonCharHtmlSafeSet.set(i) + } + } + for i = 0; i <= utf8.RuneSelf; i++ { + switch i { + case ' ', '\t', '\r', '\n': + jsonCharWhitespaceSet.set(i) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': + jsonNumSet.set(i) + } + } +} + +// ---------------- + +type jsonEncDriverTypical struct { + jsonEncDriver +} + +func (e *jsonEncDriverTypical) typical() {} + +func (e *jsonEncDriverTypical) WriteArrayStart(length int) { + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverTypical) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverTypical) WriteArrayEnd() { + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverTypical) WriteMapStart(length int) { + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverTypical) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + e.c = containerMapKey +} + +func (e *jsonEncDriverTypical) WriteMapElemValue() { + e.w.writen1(':') + e.c = containerMapValue +} + +func (e *jsonEncDriverTypical) WriteMapEnd() { + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverTypical) EncodeBool(b bool) { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } +} + +func (e *jsonEncDriverTypical) EncodeFloat64(f float64) { + fmt, prec := jsonFloatStrconvFmtPrec(f) + e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) +} + +func (e *jsonEncDriverTypical) EncodeInt(v int64) { + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeUint(v uint64) { + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeFloat32(f float32) { + e.EncodeFloat64(float64(f)) +} + +// func (e *jsonEncDriverTypical) atEndOfEncode() { +// if e.tw { +// e.w.writen1(' ') +// } +// } + +// ---------------- + +type jsonEncDriverGeneric struct { + jsonEncDriver + // ds string // indent string + di int8 // indent per + d bool // indenting? + dt bool // indent using tabs + dl uint16 // indent level + ks bool // map key as string + is byte // integer as string + _ byte // padding + _ [2]uint64 // padding +} + +// indent is done as below: +// - newline and indent are added before each mapKey or arrayElem +// - newline and indent are added before each ending, +// except there was no entry (so we can have {} or []) + +func (e *jsonEncDriverGeneric) reset() { + e.jsonEncDriver.reset() + e.d, e.dt, e.dl, e.di = false, false, 0, 0 + if e.h.Indent > 0 { + e.d = true + e.di = int8(e.h.Indent) + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.di = int8(-e.h.Indent) + } + e.ks = e.h.MapKeyAsString + e.is = e.h.IntegerAsString +} + +func (e *jsonEncDriverGeneric) WriteArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverGeneric) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverGeneric) WriteArrayEnd() { + if e.d { + e.dl-- + if e.c != containerArrayStart { + e.writeIndent() + } + } + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverGeneric) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverGeneric) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerMapKey +} + +func (e *jsonEncDriverGeneric) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } + e.c = containerMapValue +} + +func (e *jsonEncDriverGeneric) WriteMapEnd() { + if e.d { + e.dl-- + if e.c != containerMapStart { + e.writeIndent() + } + } + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverGeneric) writeIndent() { + e.w.writen1('\n') + x := int(e.di) * int(e.dl) + if e.dt { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonTabs[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonTabs[:x]) + } else { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonSpaces[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonSpaces[:x]) + } +} + +func (e *jsonEncDriverGeneric) EncodeBool(b bool) { + if e.ks && e.c == containerMapKey { + if b { + e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]) + } + } else { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } + } +} + +func (e *jsonEncDriverGeneric) EncodeFloat64(f float64) { + // instead of using 'g', specify whether to use 'e' or 'f' + fmt, prec := jsonFloatStrconvFmtPrec(f) + + var blen int + if e.ks && e.c == containerMapKey { + blen = 2 + len(strconv.AppendFloat(e.b[1:1], f, fmt, prec, 64)) + e.b[0] = '"' + e.b[blen-1] = '"' + } else { + blen = len(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) + } + e.w.writeb(e.b[:blen]) +} + +func (e *jsonEncDriverGeneric) EncodeInt(v int64) { + x := e.is + if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeUint(v uint64) { + x := e.is + if x == 'A' || x == 'L' && v > 1<<53 || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeFloat32(f float32) { + // e.encodeFloat(float64(f), 32) + // always encode all floats as IEEE 64-bit floating point. + // It also ensures that we can decode in full precision even if into a float32, + // as what is written is always to float64 precision. + e.EncodeFloat64(float64(f)) +} + +// func (e *jsonEncDriverGeneric) atEndOfEncode() { +// if e.tw { +// if e.d { +// e.w.writen1('\n') +// } else { +// e.w.writen1(' ') +// } +// } +// } + +// -------------------- + +type jsonEncDriver struct { + noBuiltInTypes + e *Encoder + h *JsonHandle + w *encWriterSwitch + se extWrapper + // ---- cpu cache line boundary? + bs []byte // scratch + // ---- cpu cache line boundary? + // scratch: encode time, etc. + // include scratch buffer and padding, but leave space for containerstate + b [jsonScratchArrayLen + 8 + 8 - 1]byte + c containerState + // _ [2]uint64 // padding +} + +func (e *jsonEncDriver) EncodeNil() { + // We always encode nil as just null (never in quotes) + // This allows us to easily decode if a nil in the json stream + // ie if initial token is n. + e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + + // if e.h.MapKeyAsString && e.c == containerMapKey { + // e.w.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]) + // } else { + // e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + // } +} + +func (e *jsonEncDriver) EncodeTime(t time.Time) { + // Do NOT use MarshalJSON, as it allocates internally. + // instead, we call AppendFormat directly, using our scratch buffer (e.b) + if t.IsZero() { + e.EncodeNil() + } else { + e.b[0] = '"' + b := t.AppendFormat(e.b[1:1], time.RFC3339Nano) + e.b[len(b)+1] = '"' + e.w.writeb(e.b[:len(b)+2]) + } + // v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.w.writeb(v) +} + +func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + // only encodes re.Value (never re.Data) + if re.Value == nil { + e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringEnc(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if v == nil { + e.EncodeNil() + return + } + if c == cRAW { + if e.se.InterfaceExt != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + + slen := base64.StdEncoding.EncodedLen(len(v)) + 2 + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + e.bs[0] = '"' + base64.StdEncoding.Encode(e.bs[1:], v) + e.bs[slen-1] = '"' + e.w.writeb(e.bs) + } else { + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if v == nil { + e.EncodeNil() + return + } + if e.se.InterfaceExt != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + + slen := base64.StdEncoding.EncodedLen(len(v)) + 2 + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + e.bs[0] = '"' + base64.StdEncoding.Encode(e.bs[1:], v) + e.bs[slen-1] = '"' + e.w.writeb(e.bs) +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.w.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.w + htmlasis := e.h.HTMLCharsAsIs + w.writen1('"') + var start int + for i, slen := 0, len(s); i < slen; { + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + if b := s[i]; b < utf8.RuneSelf { + // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) { + if jsonCharHtmlSafeSet.isset(b) || (htmlasis && jsonCharSafeSet.isset(b)) { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + default: + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +func (e *jsonEncDriver) atEndOfEncode() { + // if e.c == 0 { // scalar written, output space + // e.w.writen1(' ') + // } else if e.h.TermWhitespace { // container written, output new-line + // e.w.writen1('\n') + // } + if e.h.TermWhitespace { + if e.c == 0 { // scalar written, output space + e.w.writen1(' ') + } else { // container written, output new-line + e.w.writen1('\n') + } + } + + // e.c = 0 +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r *decReaderSwitch + se extWrapper + + // ---- writable fields during execution --- *try* to keep in sep cache line + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + fnull bool // found null from appendStringAsBytes + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + bstr [8]byte // scratch used for string \UXXX parsing + // ---- cpu cache line boundary? + b [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time + b2 [jsonScratchArrayLen]byte // scratch 2, used only for readUntil, decNumBytes + + // _ [3]uint64 // padding + // n jsonNum +} + +// func jsonIsWS(b byte) bool { +// // return b == ' ' || b == '\t' || b == '\r' || b == '\n' +// return jsonCharWhitespaceSet.isset(b) +// } + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '{' + if d.tok != xc { + d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '[' + if d.tok != xc { + d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + return d.tok == '}' || d.tok == ']' +} + +// For the ReadXXX methods below, we could just delegate to helper functions +// readContainerState(c containerState, xc uint8, check bool) +// - ReadArrayElem would become: +// readContainerState(containerArrayElem, ',', d.c != containerArrayStart) +// +// However, until mid-stack inlining comes in go1.11 which supports inlining of +// one-liners, we explicitly write them all 5 out to elide the extra func call. +// +// TODO: For Go 1.11, if inlined, consider consolidating these. + +func (d *jsonDecDriver) ReadArrayElem() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerArrayStart { + if d.tok != xc { + d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerArrayElem +} + +func (d *jsonDecDriver) ReadArrayEnd() { + const xc uint8 = ']' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayEnd +} + +func (d *jsonDecDriver) ReadMapElemKey() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerMapStart { + if d.tok != xc { + d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerMapKey +} + +func (d *jsonDecDriver) ReadMapElemValue() { + const xc uint8 = ':' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapValue +} + +func (d *jsonDecDriver) ReadMapEnd() { + const xc uint8 = '}' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapEnd +} + +// func (d *jsonDecDriver) readLit(length, fromIdx uint8) { +// // length here is always less than 8 (literals are: null, true, false) +// bs := d.r.readx(int(length)) +// d.tok = 0 +// if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { +// d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs) +// } +// } + +func (d *jsonDecDriver) readLit4True() { + bs := d.r.readx(3) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4True) { + d.d.errorf("expecting %s: got %s", jsonLiteral4True, bs) + } +} + +func (d *jsonDecDriver) readLit4False() { + bs := d.r.readx(4) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4False) { + d.d.errorf("expecting %s: got %s", jsonLiteral4False, bs) + } +} + +func (d *jsonDecDriver) readLit4Null() { + bs := d.r.readx(3) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4Null) { + d.d.errorf("expecting %s: got %s", jsonLiteral4Null, bs) + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // we shouldn't try to see if "null" was here, right? + // only the plain string: `null` denotes a nil (ie not quotes) + if d.tok == 'n' { + d.readLit4Null() + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() (v bool) { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + fquot := d.c == containerMapKey && d.tok == '"' + if fquot { + d.tok = d.r.readn1() + } + switch d.tok { + case 'f': + d.readLit4False() + // v = false + case 't': + d.readLit4True() + v = true + default: + d.d.errorf("decode bool: got first char %c", d.tok) + // v = false // "unreachable" + } + if fquot { + d.r.readn1() + } + return +} + +func (d *jsonDecDriver) DecodeTime() (t time.Time) { + // read string, and pass the string into json.unmarshal + d.appendStringAsBytes() + if d.fnull { + return + } + t, err := time.Parse(time.RFC3339, stringView(d.bs)) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + // optimize this, so we don't do 4 checks but do one computation. + // return jsonContainerSet[d.tok] + + // ContainerType is mostly called for Map and Array, + // so this conditional is good enough (max 2 checks typically) + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset +} + +func (d *jsonDecDriver) decNumBytes() (bs []byte) { + // stores num bytes in d.bs + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok == '"' { + bs = d.r.readUntil(d.b2[:0], '"') + bs = bs[:len(bs)-1] + } else { + d.r.unreadn1() + bs = d.r.readTo(d.bs[:0], &jsonNumSet) + } + d.tok = 0 + return bs +} + +func (d *jsonDecDriver) DecodeUint64() (u uint64) { + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing unsigned integer: %s", bs) + } else if neg { + d.d.errorf("minus found parsing unsigned integer: %s", bs) + } else if badsyntax { + // fallback: try to decode as float, and cast + n = d.decUint64ViaFloat(stringView(bs)) + } + return n +} + +func (d *jsonDecDriver) DecodeInt64() (i int64) { + const cutoff = uint64(1 << uint(64-1)) + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing integer: %s", bs) + } else if badsyntax { + // d.d.errorf("invalid syntax for integer: %s", bs) + // fallback: try to decode as float, and cast + if neg { + n = d.decUint64ViaFloat(stringView(bs[1:])) + } else { + n = d.decUint64ViaFloat(stringView(bs)) + } + } + if neg { + if n > cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = -(int64(n)) + } else { + if n >= cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = int64(n) + } + return +} + +func (d *jsonDecDriver) decUint64ViaFloat(s string) (u uint64) { + if len(s) == 0 { + return + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + d.d.errorf("invalid syntax for integer: %s", s) + // d.d.errorv(err) + } + fi, ff := math.Modf(f) + if ff > 0 { + d.d.errorf("fractional part found parsing integer: %s", s) + } else if fi > float64(math.MaxUint64) { + d.d.errorf("overflow parsing integer: %s", s) + } + return uint64(fi) +} + +func (d *jsonDecDriver) DecodeFloat64() (f float64) { + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + f, err := strconv.ParseFloat(stringView(bs), 64) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if d.se.InterfaceExt != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.tok == '[' { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + d.appendStringAsBytes() + // base64 encodes []byte{} as "", and we encode nil []byte as null. + // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. + // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. + // However, it sets a fnull field to true, so we can check if a null was found. + if len(d.bs) == 0 { + if d.fnull { + return nil + } + return []byte{} + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + return d.bsToString() +} + +func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { + d.appendStringAsBytes() + return d.bs +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + d.fnull = false + if d.tok != '"' { + // d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok) + // handle non-string scalar: null, true, false or a number + switch d.tok { + case 'n': + d.readLit4Null() + d.bs = d.bs[:0] + d.fnull = true + case 'f': + d.readLit4False() + d.bs = d.bs[:5] + copy(d.bs, "false") + case 't': + d.readLit4True() + d.bs = d.bs[:4] + copy(d.bs, "true") + default: + // try to parse a valid number + bs := d.decNumBytes() + if len(bs) <= cap(d.bs) { + d.bs = d.bs[:len(bs)] + } else { + d.bs = make([]byte, len(bs)) + } + copy(d.bs, bs) + } + return + } + + d.tok = 0 + r := d.r + var cs = r.readUntil(d.b2[:0], '"') + var cslen = uint(len(cs)) + var c uint8 + v := d.bs[:0] + // append on each byte seen can be expensive, so we just + // keep track of where we last read a contiguous set of + // non-special bytes (using cursor variable), + // and when we see a special byte + // e.g. end-of-slice, " or \, + // we will append the full range into the v slice before proceeding + var i, cursor uint + for { + if i == cslen { + v = append(v, cs[cursor:]...) + cs = r.readUntil(d.b2[:0], '"') + cslen = uint(len(cs)) + i, cursor = 0, 0 + } + c = cs[i] + if c == '"' { + v = append(v, cs[cursor:i]...) + break + } + if c != '\\' { + i++ + continue + } + v = append(v, cs[cursor:i]...) + i++ + c = cs[i] + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + var r rune + var rr uint32 + if cslen < i+4 { + d.d.errorf("need at least 4 more bytes for unicode sequence") + } + var j uint + for _, c = range cs[i+1 : i+5] { // bounds-check-elimination + // best to use explicit if-else + // - not a table, etc which involve memory loads, array lookup with bounds checks, etc + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = rune(rr) + i += 4 + if utf16.IsSurrogate(r) { + if len(cs) >= int(i+6) { + var cx = cs[i+1:][:6:6] // [:6] affords bounds-check-elimination + if cx[0] == '\\' && cx[1] == 'u' { + i += 2 + var rr1 uint32 + for j = 2; j < 6; j++ { + c = cx[j] + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = utf16.DecodeRune(r, rune(rr1)) + i += 4 + goto encode_rune + } + } + r = unicode.ReplacementChar + } + encode_rune: + w2 := utf8.EncodeRune(d.bstr[:], r) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("unsupported escaped value: %c", c) + } + i++ + cursor = i + } + d.bs = v +} + +func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) { + const cutoff = uint64(1 << uint(64-1)) + + var n uint64 + var neg, badsyntax, overflow bool + + if len(bs) == 0 { + if d.h.PreferFloat { + z.v = valueTypeFloat + z.f = 0 + } else if d.h.SignedInteger { + z.v = valueTypeInt + z.i = 0 + } else { + z.v = valueTypeUint + z.u = 0 + } + return + } + if d.h.PreferFloat { + goto F + } + n, neg, badsyntax, overflow = jsonParseInteger(bs) + if badsyntax || overflow { + goto F + } + if neg { + if n > cutoff { + goto F + } + z.v = valueTypeInt + z.i = -(int64(n)) + } else if d.h.SignedInteger { + if n >= cutoff { + goto F + } + z.v = valueTypeInt + z.i = int64(n) + } else { + z.v = valueTypeUint + z.u = n + } + return +F: + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + return +} + +func (d *jsonDecDriver) bsToString() string { + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if jsonAlwaysReturnInternString || d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) DecodeNaked() { + z := d.d.naked() + // var decodeFurther bool + + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + switch d.tok { + case 'n': + d.readLit4Null() + z.v = valueTypeNil + case 'f': + d.readLit4False() + z.v = valueTypeBool + z.b = false + case 't': + d.readLit4True() + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart + case '[': + z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart + case '"': + // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first + d.appendStringAsBytes() + if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString { + switch stringView(d.bs) { + case "null": + z.v = valueTypeNil + case "true": + z.v = valueTypeBool + z.b = true + case "false": + z.v = valueTypeBool + z.b = false + default: + // check if a number: float, int or uint + if err := d.nakedNum(z, d.bs); err != nil { + z.v = valueTypeString + z.s = d.bsToString() + } + } + } else { + z.v = valueTypeString + z.s = d.bsToString() + } + default: // number + bs := d.decNumBytes() + if len(bs) == 0 { + d.d.errorf("decode number from empty string") + return + } + if err := d.nakedNum(z, bs); err != nil { + d.d.errorf("decode number from %s: %v", bs, err) + return + } + } + // if decodeFurther { + // d.s.sc.retryRead() + // } +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc. +// - decode integers from float formatted numbers e.g. 1.27e+8 +// - decode any json value (numbers, bool, etc) from quoted strings +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +// +// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are +// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD. +type JsonHandle struct { + textEncodingType + BasicHandle + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString byte + + // HTMLCharsAsIs controls how to encode some special characters to html: < > & + // + // By default, we encode them as \uXXX + // to prevent security holes when served from some browsers. + HTMLCharsAsIs bool + + // PreferFloat says that we will default to decoding a number as a float. + // If not set, we will examine the characters of the number and decode as an + // integer type if it doesn't have any of the characters [.eE]. + PreferFloat bool + + // TermWhitespace says that we add a whitespace character + // at the end of an encoding. + // + // The whitespace is important, especially if using numbers in a context + // where multiple items are written to a stream. + TermWhitespace bool + + // MapKeyAsString says to encode all map keys as strings. + // + // Use this to enforce strict json output. + // The only caveat is that nil value is ALWAYS written as null (never as "null") + MapKeyAsString bool + + // _ [2]byte // padding + + // Note: below, we store hardly-used items e.g. RawBytesExt is cached in the (en|de)cDriver. + + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + _ [2]uint64 // padding +} + +// Name returns the name of the handle: json +func (h *JsonHandle) Name() string { return "json" } +func (h *JsonHandle) hasElemSeparators() bool { return true } +func (h *JsonHandle) typical() bool { + return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L' +} + +type jsonTypical interface { + typical() +} + +func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) { + _, v = ed.(jsonTypical) + return v != h.typical() +} + +// SetInterfaceExt sets an extension +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) { + var hd *jsonEncDriver + if h.typical() { + var v jsonEncDriverTypical + ee = &v + hd = &v.jsonEncDriver + } else { + var v jsonEncDriverGeneric + ee = &v + hd = &v.jsonEncDriver + } + hd.e, hd.h, hd.bs = e, h, hd.b[:0] + hd.se.BytesExt = bytesExtFailer{} + ee.reset() + return +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, h: h} + hd.se.BytesExt = bytesExtFailer{} + hd.bs = hd.b[:0] + hd.reset() + return &hd +} + +func (e *jsonEncDriver) reset() { + e.w = e.e.w + e.se.InterfaceExt = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.c = 0 +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r + d.se.InterfaceExt = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + // d.n.reset() +} + +func jsonFloatStrconvFmtPrec(f float64) (fmt byte, prec int) { + prec = -1 + var abs = math.Abs(f) + if abs != 0 && (abs < 1e-6 || abs >= 1e21) { + fmt = 'e' + } else { + fmt = 'f' + // set prec to 1 iff mod is 0. + // better than using jsonIsFloatBytesB2 to check if a . or E in the float bytes. + // this ensures that every float has an e or .0 in it. + if abs <= 1 { + if abs == 0 || abs == 1 { + prec = 1 + } + } else if _, mod := math.Modf(abs); mod == 0 { + prec = 1 + } + } + return +} + +// custom-fitted version of strconv.Parse(Ui|I)nt. +// Also ensures we don't have to search for .eE to determine if a float or not. +// Note: s CANNOT be a zero-length slice. +func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) { + const maxUint64 = (1<<64 - 1) + const cutoff = maxUint64/10 + 1 + + if len(s) == 0 { // bounds-check-elimination + // treat empty string as zero value + // badSyntax = true + return + } + switch s[0] { + case '+': + s = s[1:] + case '-': + s = s[1:] + neg = true + } + for _, c := range s { + if c < '0' || c > '9' { + badSyntax = true + return + } + // unsigned integers don't overflow well on multiplication, so check cutoff here + // e.g. (maxUint64-5)*10 doesn't overflow well ... + if n >= cutoff { + overflow = true + return + } + n *= 10 + n1 := n + uint64(c-'0') + if n1 < n || n1 > maxUint64 { + overflow = true + return + } + n = n1 + } + return +} + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriverGeneric)(nil) +var _ encDriver = (*jsonEncDriverTypical)(nil) +var _ jsonTypical = (*jsonEncDriverTypical)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth-test.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth-test.go.tmpl new file mode 100644 index 0000000000000..c598cc73a5eba --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth-test.go.tmpl @@ -0,0 +1,154 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from mammoth-test.go.tmpl - DO NOT EDIT. + +package codec + +import "testing" +import "fmt" +import "reflect" + +// TestMammoth has all the different paths optimized in fast-path +// It has all the primitives, slices and maps. +// +// For each of those types, it has a pointer and a non-pointer field. + +func init() { _ = fmt.Printf } // so we can include fmt as needed + +type TestMammoth struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }} +func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { } +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +func doTestMammothSlices(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} + var v{{$i}}va [8]{{ .Elem }} + for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { {{/* + // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) + // - encode value to some []byte + // - decode into a length-wise-equal []byte + // - check if equal to initial slice + // - encode ptr to the value + // - check if encode bytes are same + // - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice + // - decode into non-addressable slice of equal length, then larger len + // - for each decode, compare elem-by-elem to the original slice + // - + // - rinse and repeat for a MapBySlice version + // - + */}} + var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr") + // ... + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:1:1] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap") + if len(v{{$i}}v1) > 1 { + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr") + testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr") + testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr") + } + // ... + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }} + v{{$i}}v2 = nil + if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom") + bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") + v{{$i}}v2 = nil + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p") + } +{{end}}{{end}}{{end}} +} + +func doTestMammothMaps(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} + for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } { + // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) + var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len") + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil") + // ... + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }} + v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2) + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len") + } +{{end}}{{end}}{{end}} + +} + +func doTestMammothMapsAndSlices(t *testing.T, h Handle) { + doTestMammothSlices(t, h) + doTestMammothMaps(t, h) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth2-test.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth2-test.go.tmpl new file mode 100644 index 0000000000000..3b546f3e40b6d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth2-test.go.tmpl @@ -0,0 +1,92 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT. + +package codec + +// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go.... +// +// Add: +// - test file for creating a mammoth generated file as _mammoth_generated.go +// - generate a second mammoth files in a different file: mammoth2_generated_test.go +// - mammoth-test.go.tmpl will do this +// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags) +// - as part of TestMammoth, run it also +// - this will cover all the codecgen, gen-helper, etc in one full run +// - check in mammoth* files into github also +// - then +// +// Now, add some types: +// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it +// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types +// - this wrapper object is what we work encode/decode (so that the codecgen methods are called) + + +// import "encoding/binary" +import "fmt" + +type TestMammoth2 struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +// ----------- + +type testMammoth2Binary uint64 +func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) { +data = make([]byte, 8) +bigen.PutUint64(data, uint64(x)) +return +} +func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) { +*x = testMammoth2Binary(bigen.Uint64(data)) +return +} + +type testMammoth2Text uint64 +func (x testMammoth2Text) MarshalText() (data []byte, err error) { +data = []byte(fmt.Sprintf("%b", uint64(x))) +return +} +func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x)) +return +} + +type testMammoth2Json uint64 +func (x testMammoth2Json) MarshalJSON() (data []byte, err error) { +data = []byte(fmt.Sprintf("%v", uint64(x))) +return +} +func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x)) +return +} + +type testMammoth2Basic [4]uint64 + +type TestMammoth2Wrapper struct { + V TestMammoth2 + T testMammoth2Text + B testMammoth2Binary + J testMammoth2Json + C testMammoth2Basic + M map[testMammoth2Basic]TestMammoth2 + L []TestMammoth2 + A [4]int64 +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/msgpack.go new file mode 100644 index 0000000000000..99c8a13fd85f7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/msgpack.go @@ -0,0 +1,1150 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed +*/ + +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" + "reflect" + "time" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax byte = 0x7f + mpFixMapMin byte = 0x80 + mpFixMapMax byte = 0x8f + mpFixArrayMin byte = 0x90 + mpFixArrayMax byte = 0x9f + mpFixStrMin byte = 0xa0 + mpFixStrMax byte = 0xbf + mpNil byte = 0xc0 + _ byte = 0xc1 + mpFalse byte = 0xc2 + mpTrue byte = 0xc3 + mpFloat byte = 0xca + mpDouble byte = 0xcb + mpUint8 byte = 0xcc + mpUint16 byte = 0xcd + mpUint32 byte = 0xce + mpUint64 byte = 0xcf + mpInt8 byte = 0xd0 + mpInt16 byte = 0xd1 + mpInt32 byte = 0xd2 + mpInt64 byte = 0xd3 + + // extensions below + mpBin8 byte = 0xc4 + mpBin16 byte = 0xc5 + mpBin32 byte = 0xc6 + mpExt8 byte = 0xc7 + mpExt16 byte = 0xc8 + mpExt32 byte = 0xc9 + mpFixExt1 byte = 0xd4 + mpFixExt2 byte = 0xd5 + mpFixExt4 byte = 0xd6 + mpFixExt8 byte = 0xd7 + mpFixExt16 byte = 0xd8 + + mpStr8 byte = 0xd9 // new + mpStr16 byte = 0xda + mpStr32 byte = 0xdb + + mpArray16 byte = 0xdc + mpArray32 byte = 0xdd + + mpMap16 byte = 0xde + mpMap32 byte = 0xdf + + mpNegFixNumMin byte = 0xe0 + mpNegFixNumMax byte = 0xff +) + +var mpTimeExtTag int8 = -1 +var mpTimeExtTagU = uint8(mpTimeExtTag) + +// var mpdesc = map[byte]string{ +// mpPosFixNumMin: "PosFixNumMin", +// mpPosFixNumMax: "PosFixNumMax", +// mpFixMapMin: "FixMapMin", +// mpFixMapMax: "FixMapMax", +// mpFixArrayMin: "FixArrayMin", +// mpFixArrayMax: "FixArrayMax", +// mpFixStrMin: "FixStrMin", +// mpFixStrMax: "FixStrMax", +// mpNil: "Nil", +// mpFalse: "False", +// mpTrue: "True", +// mpFloat: "Float", +// mpDouble: "Double", +// mpUint8: "Uint8", +// mpUint16: "Uint16", +// mpUint32: "Uint32", +// mpUint64: "Uint64", +// mpInt8: "Int8", +// mpInt16: "Int16", +// mpInt32: "Int32", +// mpInt64: "Int64", +// mpBin8: "Bin8", +// mpBin16: "Bin16", +// mpBin32: "Bin32", +// mpExt8: "Ext8", +// mpExt16: "Ext16", +// mpExt32: "Ext32", +// mpFixExt1: "FixExt1", +// mpFixExt2: "FixExt2", +// mpFixExt4: "FixExt4", +// mpFixExt8: "FixExt8", +// mpFixExt16: "FixExt16", +// mpStr8: "Str8", +// mpStr16: "Str16", +// mpStr32: "Str32", +// mpArray16: "Array16", +// mpArray32: "Array32", +// mpMap16: "Map16", +// mpMap32: "Map32", +// mpNegFixNumMin: "NegFixNumMin", +// mpNegFixNumMax: "NegFixNumMax", +// } + +func mpdesc(bd byte) string { + switch bd { + case mpNil: + return "nil" + case mpFalse: + return "false" + case mpTrue: + return "true" + case mpFloat, mpDouble: + return "float" + case mpUint8, mpUint16, mpUint32, mpUint64: + return "uint" + case mpInt8, mpInt16, mpInt32, mpInt64: + return "int" + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + return "int" + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + return "int" + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + return "string|bytes" + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + return "bytes" + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + return "array" + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + return "map" + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + return "ext" + default: + return "unknown" + } + } +} + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff uint8 + bFixMin, b8, b16, b32 byte + // hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerRawLegacy = msgpackContainerType{ + 32, mpFixStrMin, 0, mpStr16, mpStr32, + } + msgpackContainerStr = msgpackContainerType{ + 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false, + } + msgpackContainerBin = msgpackContainerType{ + 0, 0, mpBin8, mpBin16, mpBin32, // false, true, true, + } + msgpackContainerList = msgpackContainerType{ + 16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false, + } + msgpackContainerMap = msgpackContainerType{ + 16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false, + } +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w *encWriterSwitch + h *MsgpackHandle + x [8]byte + // _ [3]uint64 // padding +} + +func (e *msgpackEncDriver) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) EncodeInt(i int64) { + if e.h.PositiveIntUnsigned && i >= 0 { + e.EncodeUint(uint64(i)) + } else if i > math.MaxInt8 { + if i <= math.MaxInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } + } else if i >= -32 { + if e.h.NoFixedNum { + e.w.writen2(mpInt8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + if e.h.NoFixedNum { + e.w.writen2(mpUint8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpUint64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeTime(t time.Time) { + // use the MarshalBinary format if requested + if e.h.TimeNotBuiltin { + bin, err := t.MarshalBinary() + if err != nil { + return + } + e.EncodeStringBytesRaw(bin) + return + } + if t.IsZero() { + e.EncodeNil() + return + } + t = t.UTC() + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + var data64 uint64 + var l = 4 + if sec >= 0 && sec>>34 == 0 { + data64 = (nsec << 34) | uint64(sec) + if data64&0xffffffff00000000 != 0 { + l = 8 + } + } else { + l = 12 + } + if e.h.WriteExt { + e.encodeExtPreamble(mpTimeExtTagU, l) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, l) + } + switch l { + case 4: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(data64)) + case 8: + bigenHelper{e.x[:8], e.w}.writeUint64(data64) + case 12: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(nsec)) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(sec)) + } +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytesRaw(bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) WriteArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) WriteMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + slen := len(s) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeStringEnc(c charEncoding, s string) { + slen := len(s) + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerStr, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + if bs == nil { + e.EncodeNil() + return + } + slen := len(bs) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) EncodeStringBytesRaw(bs []byte) { + if bs == nil { + e.EncodeNil() + return + } + slen := len(bs) + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + if ct.fixCutoff > 0 && l < int(ct.fixCutoff) { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.b8 > 0 && l < 256 { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { + e.w.writen1(ct.b32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + d *Decoder + r *decReaderSwitch + h *MsgpackHandle + // b [scratchByteArrayLen]byte + bd byte + bdRead bool + br bool // bytes reader + noBuiltInTypes + // noStreamingCodec + // decNoSeparator + decDriverNoopContainerReader + // _ [3]uint64 // padding +} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := d.d.naked() + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.WriteExt || d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() + } else { + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + if n.u == uint64(mpTimeExtTagU) { + n.v = valueTypeTime + n.t = d.decodeTime(clen) + } else if d.br { + n.l = d.r.readx(uint(clen)) + } else { + n.l = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) + } + default: + d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeInt64() (i int64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readx(8))) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readx(8))) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + ui = bigen.Uint64(d.r.readx(8)) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) + return + default: + d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt64()) + } + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { + // b = false + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + + bd := d.bd + var clen int + if bd == mpNil { + d.bdRead = false + return + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) // binary + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) // string/raw + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + // check if an "array" of uint8's + if zerocopy && len(bs) == 0 { + bs = d.d.b[:] + } + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } else { + d.d.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd) + return + } + + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.d.b[:] + } + } + return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) +} + +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) +} + +func (d *msgpackDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + // if bd == mpNil { + // // nil + // } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + // // binary + // } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + // // string/raw + // } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + // // array + // } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + // // map + // } + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + return valueTypeBytes + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + if d.h.WriteExt || d.h.RawToString { // UTF-8 string (new spec) + return valueTypeString + } + return valueTypeBytes // raw (old spec) + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + return true + } + return +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == mpNil { + clen = -1 // to represent nil + } else if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) ReadMapStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) ReadArrayStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readx(2))) + case mpExt32: + clen = int(bigen.Uint32(d.r.readx(4))) + default: + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return + } + return +} + +func (d *msgpackDecDriver) DecodeTime() (t time.Time) { + // decode time from string bytes or ext + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + var clen int + if bd == mpNil { + d.bdRead = false + return + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) // binary + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) // string/raw + } else { + // expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1 + d.bdRead = false + b2 := d.r.readn1() + if d.bd == mpFixExt4 && b2 == mpTimeExtTagU { + clen = 4 + } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU { + clen = 8 + } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU { + clen = 12 + } else { + d.d.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) + return + } + } + return d.decodeTime(clen) +} + +func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) { + bs := d.r.readx(uint(clen)) + + // Decode as a binary marshalled string for compatibility with other versions of go-msgpack. + // time.Time should always be encoded as 16 bytes or fewer in the binary marshalling format, + // so will always fit within the 32 byte max for fixed strings + if d.bd >= mpFixStrMin && d.bd <= mpFixStrMax { + err := t.UnmarshalBinary(bs) + if err == nil { + return + } + // fallthrough on failure + } + + d.bdRead = false + switch clen { + case 4: + t = time.Unix(int64(bigen.Uint32(bs)), 0).UTC() + case 8: + tv := bigen.Uint64(bs) + t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC() + case 12: + nsec := bigen.Uint32(bs[:4]) + sec := bigen.Uint64(bs[4:]) + t = time.Unix(int64(sec), int64(nsec)).UTC() + default: + d.d.errorf("invalid bytes for decoding time - expecting string or 4, 8, or 12 bytes, got %d", clen) + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeStringAsBytes() + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) + return + } + if d.br { + xbs = d.r.readx(uint(clen)) + } else { + xbs = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) + } + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +// MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum. + NoFixedNum bool + + // WriteExt controls whether the new spec is honored. + // + // With WriteExt=true, we can encode configured extensions with extension tags + // and encode string/[]byte/extensions in a way compatible with the new spec + // but incompatible with the old spec. + // + // For compatibility with the old spec, set WriteExt=false. + // + // With WriteExt=false: + // configured extensions are serialized as raw bytes (not msgpack extensions). + // reserved byte descriptors like Str8 and those enabling the new msgpack Binary type + // are not encoded. + WriteExt bool + + // PositiveIntUnsigned says to encode positive integers as unsigned. + PositiveIntUnsigned bool + + binaryEncodingType + noElemSeparators + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: msgpack +func (h *MsgpackHandle) Name() string { return "msgpack" } + +// SetBytesExt sets an extension +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +} + +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + if cls := c.cls.load(); cls.closed { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var ba [1]byte + var n int + for { + n, err = c.r.Read(ba[:]) + if err != nil { + return + } + if n == 1 { + break + } + } + + var b = ba[0] + if b != fia { + err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b)) + } else { + err = c.read(&b) + if err == nil { + if b != expectTypeByte { + err = fmt.Errorf("%s - expecting %v but got %x/%s", + msgBadDesc, expectTypeByte, b, mpdesc(b)) + } else { + err = c.read(msgid) + if err == nil { + err = c.read(methodOrError) + } + } + } + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// +// See GoRpc documentation, for information on buffering for better performance. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/rpc.go new file mode 100644 index 0000000000000..3fa9f547aec06 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/rpc.go @@ -0,0 +1,227 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "errors" + "io" + "net/rpc" +) + +var errRpcJsonNeedsTermWhitespace = errors.New("rpc requires JsonHandle with TermWhitespace=true") + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RPCOptions holds options specific to rpc functionality +type RPCOptions struct { + // RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls. + // + // Set RPCNoBuffer=true to turn buffering off. + // Buffering can still be done if buffered connections are passed in, or + // buffering is configured on the handle. + RPCNoBuffer bool +} + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + c io.Closer + r io.Reader + w io.Writer + f ioFlusher + + dec *Decoder + enc *Encoder + // bw *bufio.Writer + // br *bufio.Reader + h Handle + + cls atomicClsErr +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + // return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h) + return newRPCCodec2(conn, conn, conn, h) +} + +func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec { + // defensive: ensure that jsonH has TermWhitespace turned on. + if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { + panic(errRpcJsonNeedsTermWhitespace) + } + // always ensure that we use a flusher, and always flush what was written to the connection. + // we lose nothing by using a buffered writer internally. + f, ok := w.(ioFlusher) + bh := basicHandle(h) + if !bh.RPCNoBuffer { + if bh.WriterBufferSize <= 0 { + if !ok { + bw := bufio.NewWriter(w) + f, w = bw, bw + } + } + if bh.ReaderBufferSize <= 0 { + if _, ok = w.(ioPeeker); !ok { + if _, ok = w.(ioBuffered); !ok { + br := bufio.NewReader(r) + r = br + } + } + } + } + return rpcCodec{ + c: c, + w: w, + r: r, + f: f, + h: h, + enc: NewEncoder(w, h), + dec: NewDecoder(r, h), + } +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) { + if c.c != nil { + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } + } + err = c.enc.Encode(obj1) + if err == nil { + if writeObj2 { + err = c.enc.Encode(obj2) + } + // if err == nil && c.f != nil { + // err = c.f.Flush() + // } + } + if c.f != nil { + if err == nil { + err = c.f.Flush() + } else { + _ = c.f.Flush() // swallow flush error, so we maintain prior error on write + } + } + return +} + +func (c *rpcCodec) swallow(err *error) { + defer panicToErr(c.dec, err) + c.dec.swallow() +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.c != nil { + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } + } + //If nil is passed in, we should read and discard + if obj == nil { + // var obj2 interface{} + // return c.dec.Decode(&obj2) + c.swallow(&err) + return + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) Close() error { + if c.c == nil { + return nil + } + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } + cls.errClosed = c.c.Close() + cls.closed = true + c.cls.store(cls) + return cls.errClosed +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + return c.write(r, body, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + return c.write(r, body, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// +// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered. +// +// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle. +// This ensures we use an adequate buffer during reading and writing. +// If not configured, we will internally initialize and use a buffer during reads and writes. +// This can be turned off via the RPCNoBuffer option on the Handle. +// +// var handle codec.JsonHandle +// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer +// +// Example 1: one way of configuring buffering explicitly: +// +// var handle codec.JsonHandle // codec handle +// handle.ReaderBufferSize = 1024 +// handle.WriterBufferSize = 1024 +// var conn io.ReadWriteCloser // connection got from a socket +// var serverCodec = GoRpc.ServerCodec(conn, handle) +// var clientCodec = GoRpc.ClientCodec(conn, handle) +// +// Example 2: you can also explicitly create a buffered connection yourself, +// and not worry about configuring the buffer sizes in the Handle. +// +// var handle codec.Handle // codec handle +// var conn io.ReadWriteCloser // connection got from a socket +// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser +// io.Closer +// *bufio.Reader +// *bufio.Writer +// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} +// var serverCodec = GoRpc.ServerCodec(bufconn, handle) +// var clientCodec = GoRpc.ClientCodec(bufconn, handle) +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/vendor/github.com/hashicorp/go-msgpack/v2/codec/test.py similarity index 64% rename from vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py rename to vendor/github.com/hashicorp/go-msgpack/v2/codec/test.py index e933838c56a89..8418fee4d1eb6 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/test.py @@ -4,6 +4,13 @@ # A Test calls this internally to create the golden files # So it can process them (so we don't have to checkin the files). +# Ensure msgpack-python is installed first, using: +# sudo apt-get install python-dev +# sudo apt-get install python-pip +# pip install --user msgpack-python msgpack-rpc-python + +# Ensure all "string" keys are utf strings (else encoded as bytes) + import msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): @@ -21,54 +28,59 @@ def get_test_data_list(): -3232.0, -6464646464.0, 3232.0, + 6464.0, 6464646464.0, False, True, + u"null", None, - "someday", - "", - "bytestring", + u"some&day>some 0 @@ -80,17 +92,17 @@ def myStopRpcServer(): server.start() def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('localhost', port) + address = msgpackrpc.Address('127.0.0.1', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("Echo123", "A1", "B2", "C3") - print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + print(client.call("Echo123", "A1", "B2", "C3")) + print(client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})) def doRpcClientToGoSvc(port): # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('localhost', port) + address = msgpackrpc.Address('127.0.0.1', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) - print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + print(client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])) + print(client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})) def doMain(args): if len(args) == 2 and args[0] == "testdata": @@ -102,7 +114,7 @@ def doMain(args): elif len(args) == 2 and args[0] == "rpc-client-go-service": doRpcClientToGoSvc(int(args[1])) else: - print("Usage: msgpack_test.py " + + print("Usage: test.py " + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") if __name__ == "__main__": diff --git a/vendor/github.com/hashicorp/memberlist/.go-version b/vendor/github.com/hashicorp/memberlist/.go-version new file mode 100644 index 0000000000000..5fb5a6b4f547c --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/.go-version @@ -0,0 +1 @@ +1.20 diff --git a/vendor/github.com/hashicorp/memberlist/.golangci.yml b/vendor/github.com/hashicorp/memberlist/.golangci.yml new file mode 100644 index 0000000000000..daf9298e9679d --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/.golangci.yml @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +version: "2" +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + +run: + timeout: 10m + concurrency: 4 diff --git a/vendor/github.com/hashicorp/memberlist/CHANGELOG.md b/vendor/github.com/hashicorp/memberlist/CHANGELOG.md new file mode 100644 index 0000000000000..a0b285324bd8b --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/CHANGELOG.md @@ -0,0 +1,9 @@ +## Unreleased + +### Improvements + +### Changes + +### Fixed + +### Security diff --git a/vendor/github.com/hashicorp/memberlist/LICENSE b/vendor/github.com/hashicorp/memberlist/LICENSE index c33dcc7c928c6..c72625e4cc88b 100644 --- a/vendor/github.com/hashicorp/memberlist/LICENSE +++ b/vendor/github.com/hashicorp/memberlist/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2013 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/memberlist/README.md b/vendor/github.com/hashicorp/memberlist/README.md index 6a2caa30e06f5..5aa5bdb676574 100644 --- a/vendor/github.com/hashicorp/memberlist/README.md +++ b/vendor/github.com/hashicorp/memberlist/README.md @@ -71,3 +71,28 @@ convergence rate. For details on all of these extensions, please read our paper "[Lifeguard : SWIM-ing with Situational Awareness](https://arxiv.org/abs/1707.00788)", along with the memberlist source. We welcome any questions related to the protocol on our issue tracker. + +## Metrics Emission and Compatibility + +This library can emit metrics using either `github.com/armon/go-metrics` or `github.com/hashicorp/go-metrics`. Choosing between the libraries is controlled via build tags. + +**Build Tags** +* `armonmetrics` - Using this tag will cause metrics to be routed to `armon/go-metrics` +* `hashicorpmetrics` - Using this tag will cause all metrics to be routed to `hashicorp/go-metrics` + +If no build tag is specified, the default behavior is to use `armon/go-metrics`. + +**Deprecating `armon/go-metrics`** + +Emitting metrics to `armon/go-metrics` is officially deprecated. Usage of `armon/go-metrics` will remain the default until mid-2025 with opt-in support continuing to the end of 2025. + +**Migration** +To migrate an application currently using the older `armon/go-metrics` to instead use `hashicorp/go-metrics` the following should be done. + +1. Upgrade libraries using `armon/go-metrics` to consume `hashicorp/go-metrics/compat` instead. This should involve only changing import statements. All repositories in the `hashicorp` namespace +2. Update an applications library dependencies to those that have the compatibility layer configured. +3. Update the application to use `hashicorp/go-metrics` for configuring metrics export instead of `armon/go-metrics` + * Replace all application imports of `github.com/armon/go-metrics` with `github.com/hashicorp/go-metrics` + * Instrument your build system to build with the `hashicorpmetrics` tag. + +Eventually once the default behavior changes to use `hashicorp/go-metrics` by default (mid-2025), you can drop the `hashicorpmetrics` build tag. diff --git a/vendor/github.com/hashicorp/memberlist/alive_delegate.go b/vendor/github.com/hashicorp/memberlist/alive_delegate.go index 615f4a90a5909..7300e7be45867 100644 --- a/vendor/github.com/hashicorp/memberlist/alive_delegate.go +++ b/vendor/github.com/hashicorp/memberlist/alive_delegate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist // AliveDelegate is used to involve a client in processing diff --git a/vendor/github.com/hashicorp/memberlist/awareness.go b/vendor/github.com/hashicorp/memberlist/awareness.go index ea95c75388009..c1b8786fbcba2 100644 --- a/vendor/github.com/hashicorp/memberlist/awareness.go +++ b/vendor/github.com/hashicorp/memberlist/awareness.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( "sync" "time" - "github.com/armon/go-metrics" + "github.com/hashicorp/go-metrics/compat" ) // awareness manages a simple metric for tracking the estimated health of the @@ -21,13 +24,17 @@ type awareness struct { // score is the current awareness score. Lower values are healthier and // zero is the minimum value. score int + + // metricLabels is the slice of labels to put on all emitted metrics + metricLabels []metrics.Label } // newAwareness returns a new awareness object. -func newAwareness(max int) *awareness { +func newAwareness(max int, metricLabels []metrics.Label) *awareness { return &awareness{ - max: max, - score: 0, + max: max, + score: 0, + metricLabels: metricLabels, } } @@ -47,7 +54,7 @@ func (a *awareness) ApplyDelta(delta int) { a.Unlock() if initial != final { - metrics.SetGauge([]string{"memberlist", "health", "score"}, float32(final)) + metrics.SetGaugeWithLabels([]string{"memberlist", "health", "score"}, float32(final), a.metricLabels) } } diff --git a/vendor/github.com/hashicorp/memberlist/broadcast.go b/vendor/github.com/hashicorp/memberlist/broadcast.go index d07d41bb69d91..8dc93c2464d0a 100644 --- a/vendor/github.com/hashicorp/memberlist/broadcast.go +++ b/vendor/github.com/hashicorp/memberlist/broadcast.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist /* @@ -55,7 +58,7 @@ func (m *Memberlist) encodeAndBroadcast(node string, msgType messageType, msg in // and notifies the given channel when transmission is finished. Fails // silently if there is an encoding error. func (m *Memberlist) encodeBroadcastNotify(node string, msgType messageType, msg interface{}, notify chan struct{}) { - buf, err := encode(msgType, msg) + buf, err := encode(msgType, msg, m.config.MsgpackUseNewTimeFormat) if err != nil { m.logger.Printf("[ERR] memberlist: Failed to encode message for broadcast: %s", err) } else { diff --git a/vendor/github.com/hashicorp/memberlist/config.go b/vendor/github.com/hashicorp/memberlist/config.go index d7fe4c37b059a..04431b2428f75 100644 --- a/vendor/github.com/hashicorp/memberlist/config.go +++ b/vendor/github.com/hashicorp/memberlist/config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( @@ -9,7 +12,8 @@ import ( "strings" "time" - multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-metrics/compat" + "github.com/hashicorp/go-multierror" ) type Config struct { @@ -114,6 +118,12 @@ type Config struct { // usage. PushPullInterval time.Duration + // PushPullNodes is the number of random nodes to perform complete state + // syncs with per PushPullInterval. Increasing this number will increase + // convergence speeds across larger clusters at the expense of increased + // bandwidth usage. Setting this to 0 will use the default of 1. + PushPullNodes int + // ProbeInterval and ProbeTimeout are used to configure probing // behavior for memberlist. // @@ -206,6 +216,7 @@ type Config struct { Merge MergeDelegate Ping PingDelegate Alive AliveDelegate + NodeSelection NodeSelectionDelegate // DNSConfigPath points to the system's DNS config file, usually located // at /etc/resolv.conf. It can be overridden via config for easier testing. @@ -244,10 +255,24 @@ type Config struct { // RequireNodeNames controls if the name of a node is required when sending // a message to that node. RequireNodeNames bool + // CIDRsAllowed If nil, allow any connection (default), otherwise specify all networks // allowed to connect (you must specify IPv6/IPv4 separately) // Using [] will block all connections. CIDRsAllowed []net.IPNet + + // MetricLabels is a map of optional labels to apply to all metrics emitted. + MetricLabels []metrics.Label + + // QueueCheckInterval is the interval at which we check the message + // queue to apply the warning and max depth. + QueueCheckInterval time.Duration + + // MsgpackUseNewTimeFormat when set to true, force the underlying msgpack + // codec to use the new format of time.Time when encoding (used in + // go-msgpack v1.1.5 by default). Decoding is not affected, as all + // go-msgpack v2.1.0+ decoders know how to decode both formats. + MsgpackUseNewTimeFormat bool } // ParseCIDRs return a possible empty list of all Network that have been parsed @@ -296,6 +321,7 @@ func DefaultLANConfig() *Config { SuspicionMult: 4, // Suspect a node for 4 * log(N+1) * Interval SuspicionMaxTimeoutMult: 6, // For 10k nodes this will give a max timeout of 120 seconds PushPullInterval: 30 * time.Second, // Low frequency + PushPullNodes: 1, // Push/pull with a single node ProbeTimeout: 500 * time.Millisecond, // Reasonable RTT time for LAN ProbeInterval: 1 * time.Second, // Failure check every second DisableTcpPings: false, // TCP pings are safe, even with mixed versions @@ -317,6 +343,8 @@ func DefaultLANConfig() *Config { HandoffQueueDepth: 1024, UDPBufferSize: 1400, CIDRsAllowed: nil, // same as allow all + + QueueCheckInterval: 30 * time.Second, } } @@ -328,6 +356,7 @@ func DefaultWANConfig() *Config { conf.TCPTimeout = 30 * time.Second conf.SuspicionMult = 6 conf.PushPullInterval = 60 * time.Second + conf.PushPullNodes = 1 conf.ProbeTimeout = 3 * time.Second conf.ProbeInterval = 5 * time.Second conf.GossipNodes = 4 // Gossip less frequently, but to an additional node @@ -364,6 +393,7 @@ func DefaultLocalConfig() *Config { conf.RetransmitMult = 2 conf.SuspicionMult = 3 conf.PushPullInterval = 15 * time.Second + conf.PushPullNodes = 1 conf.ProbeTimeout = 200 * time.Millisecond conf.ProbeInterval = time.Second conf.GossipInterval = 100 * time.Millisecond diff --git a/vendor/github.com/hashicorp/memberlist/conflict_delegate.go b/vendor/github.com/hashicorp/memberlist/conflict_delegate.go index f52b136ebad5f..4fae51198fd69 100644 --- a/vendor/github.com/hashicorp/memberlist/conflict_delegate.go +++ b/vendor/github.com/hashicorp/memberlist/conflict_delegate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist // ConflictDelegate is a used to inform a client that diff --git a/vendor/github.com/hashicorp/memberlist/delegate.go b/vendor/github.com/hashicorp/memberlist/delegate.go index 55154889216b7..c7e191c1ceb19 100644 --- a/vendor/github.com/hashicorp/memberlist/delegate.go +++ b/vendor/github.com/hashicorp/memberlist/delegate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist // Delegate is the interface that clients must implement if they want to hook diff --git a/vendor/github.com/hashicorp/memberlist/event_delegate.go b/vendor/github.com/hashicorp/memberlist/event_delegate.go index 352f98b43e700..20dd8a3991c2e 100644 --- a/vendor/github.com/hashicorp/memberlist/event_delegate.go +++ b/vendor/github.com/hashicorp/memberlist/event_delegate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist // EventDelegate is a simpler delegate that is used only to receive diff --git a/vendor/github.com/hashicorp/memberlist/keyring.go b/vendor/github.com/hashicorp/memberlist/keyring.go index a2774a0ce0851..163e7cde10b7f 100644 --- a/vendor/github.com/hashicorp/memberlist/keyring.go +++ b/vendor/github.com/hashicorp/memberlist/keyring.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( @@ -43,7 +46,7 @@ func NewKeyring(keys [][]byte, primaryKey []byte) (*Keyring, error) { if len(keys) > 0 || len(primaryKey) > 0 { if len(primaryKey) == 0 { - return nil, fmt.Errorf("Empty primary key not allowed") + return nil, fmt.Errorf("empty primary key not allowed") } if err := keyring.AddKey(primaryKey); err != nil { return nil, err @@ -105,14 +108,14 @@ func (k *Keyring) UseKey(key []byte) error { return nil } } - return fmt.Errorf("Requested key is not in the keyring") + return fmt.Errorf("requested key is not in the keyring") } // RemoveKey drops a key from the keyring. This will return an error if the key // requested for removal is currently at position 0 (primary key). func (k *Keyring) RemoveKey(key []byte) error { if bytes.Equal(key, k.keys[0]) { - return fmt.Errorf("Removing the primary key is not allowed") + return fmt.Errorf("removing the primary key is not allowed") } for i, installedKey := range k.keys { if bytes.Equal(key, installedKey) { diff --git a/vendor/github.com/hashicorp/memberlist/label.go b/vendor/github.com/hashicorp/memberlist/label.go index bbe0163ab64b1..e3dd7f7d16c62 100644 --- a/vendor/github.com/hashicorp/memberlist/label.go +++ b/vendor/github.com/hashicorp/memberlist/label.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( diff --git a/vendor/github.com/hashicorp/memberlist/logging.go b/vendor/github.com/hashicorp/memberlist/logging.go index 2ca2bab4e3402..f9d5f0fb2132f 100644 --- a/vendor/github.com/hashicorp/memberlist/logging.go +++ b/vendor/github.com/hashicorp/memberlist/logging.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( diff --git a/vendor/github.com/hashicorp/memberlist/memberlist.go b/vendor/github.com/hashicorp/memberlist/memberlist.go index cab6db69fd4c8..9e8f9c4d3503a 100644 --- a/vendor/github.com/hashicorp/memberlist/memberlist.go +++ b/vendor/github.com/hashicorp/memberlist/memberlist.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + /* memberlist is a library that manages cluster membership and member failure detection using a gossip based protocol. @@ -27,8 +30,9 @@ import ( "sync/atomic" "time" - multierror "github.com/hashicorp/go-multierror" - sockaddr "github.com/hashicorp/go-sockaddr" + metrics "github.com/hashicorp/go-metrics/compat" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-sockaddr" "github.com/miekg/dns" ) @@ -61,8 +65,8 @@ type Memberlist struct { msgQueueLock sync.Mutex nodeLock sync.RWMutex - nodes []*nodeState // Known nodes - nodeMap map[string]*nodeState // Maps Node.Name -> NodeState + nodes []*NodeState // Known nodes + nodeMap map[string]*NodeState // Maps Node.Name -> NodeState nodeTimers map[string]*suspicion // Maps Node.Name -> suspicion timer awareness *awareness @@ -77,6 +81,9 @@ type Memberlist struct { broadcasts *TransmitLimitedQueue logger *log.Logger + + // metricLabels is the slice of labels to put on all emitted metrics + metricLabels []metrics.Label } // BuildVsnArray creates the array of Vsn @@ -92,10 +99,10 @@ func (conf *Config) BuildVsnArray() []uint8 { // Does not schedule execution of background maintenance. func newMemberlist(conf *Config) (*Memberlist, error) { if conf.ProtocolVersion < ProtocolVersionMin { - return nil, fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]", + return nil, fmt.Errorf("protocol version '%d' too low. Must be in range: [%d, %d]", conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax) } else if conf.ProtocolVersion > ProtocolVersionMax { - return nil, fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]", + return nil, fmt.Errorf("protocol version '%d' too high. Must be in range: [%d, %d]", conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax) } @@ -117,7 +124,7 @@ func newMemberlist(conf *Config) (*Memberlist, error) { } if conf.LogOutput != nil && conf.Logger != nil { - return nil, fmt.Errorf("Cannot specify both LogOutput and Logger. Please choose a single log configuration setting.") + return nil, fmt.Errorf("cannot specify both LogOutput and Logger; please choose a single log configuration setting") } logDest := conf.LogOutput @@ -135,9 +142,10 @@ func newMemberlist(conf *Config) (*Memberlist, error) { transport := conf.Transport if transport == nil { nc := &NetTransportConfig{ - BindAddrs: []string{conf.BindAddr}, - BindPort: conf.BindPort, - Logger: logger, + BindAddrs: []string{conf.BindAddr}, + BindPort: conf.BindPort, + Logger: logger, + MetricLabels: conf.MetricLabels, } // See comment below for details about the retry in here. @@ -170,7 +178,7 @@ func newMemberlist(conf *Config) (*Memberlist, error) { nt, err := makeNetRetry(limit) if err != nil { - return nil, fmt.Errorf("Could not set up network transport: %v", err) + return nil, fmt.Errorf("could not set up network transport: %v", err) } if conf.BindPort == 0 { port := nt.GetAutoBindPort() @@ -206,12 +214,13 @@ func newMemberlist(conf *Config) (*Memberlist, error) { handoffCh: make(chan struct{}, 1), highPriorityMsgQueue: list.New(), lowPriorityMsgQueue: list.New(), - nodeMap: make(map[string]*nodeState), + nodeMap: make(map[string]*NodeState), nodeTimers: make(map[string]*suspicion), - awareness: newAwareness(conf.AwarenessMaxMultiplier), + awareness: newAwareness(conf.AwarenessMaxMultiplier, conf.MetricLabels), ackHandlers: make(map[uint32]*ackHandler), broadcasts: &TransmitLimitedQueue{RetransmitMult: conf.RetransmitMult}, logger: logger, + metricLabels: conf.MetricLabels, } m.broadcasts.NumNodes = func() int { return m.estNumNodes() @@ -227,6 +236,7 @@ func newMemberlist(conf *Config) (*Memberlist, error) { go m.streamListen() go m.packetListen() go m.packetHandler() + go m.checkBroadcastQueueDepth() return m, nil } @@ -241,7 +251,7 @@ func Create(conf *Config) (*Memberlist, error) { return nil, err } if err := m.setAlive(); err != nil { - m.Shutdown() + _ = m.Shutdown() return nil, err } m.schedule() @@ -263,7 +273,7 @@ func (m *Memberlist) Join(existing []string) (int, error) { for _, exist := range existing { addrs, err := m.resolveAddr(exist) if err != nil { - err = fmt.Errorf("Failed to resolve %s: %v", exist, err) + err = fmt.Errorf("failed to resolve %s: %v", exist, err) errs = multierror.Append(errs, err) m.logger.Printf("[WARN] memberlist: %v", err) continue @@ -273,7 +283,7 @@ func (m *Memberlist) Join(existing []string) (int, error) { hp := joinHostPort(addr.ip.String(), addr.port) a := Address{Addr: hp, Name: addr.nodeName} if err := m.pushPullNode(a, true); err != nil { - err = fmt.Errorf("Failed to join %s: %v", a.Addr, err) + err = fmt.Errorf("failed to join %s: %v", a.Addr, err) errs = multierror.Append(errs, err) m.logger.Printf("[DEBUG] memberlist: %v", err) continue @@ -429,14 +439,15 @@ func (m *Memberlist) setAlive() error { // Check if this is a public address without encryption ipAddr, err := sockaddr.NewIPAddr(addr.String()) if err != nil { - return fmt.Errorf("Failed to parse interface addresses: %v", err) + return fmt.Errorf("failed to parse interface addresses: %v", err) } ifAddrs := []sockaddr.IfAddr{ sockaddr.IfAddr{ SockAddr: ipAddr, }, } - _, publicIfs, err := sockaddr.IfByRFC("6890", ifAddrs) + _, publicIfs, _ := sockaddr.IfByRFC("6890", ifAddrs) + if len(publicIfs) > 0 && !m.config.EncryptionEnabled() { m.logger.Printf("[WARN] memberlist: Binding to public address without encryption!") } @@ -480,7 +491,7 @@ func (m *Memberlist) refreshAdvertise() (net.IP, int, error) { addr, port, err := m.transport.FinalAdvertiseAddr( m.config.AdvertiseAddr, m.config.AdvertisePort) if err != nil { - return nil, 0, fmt.Errorf("Failed to get final advertise address: %v", err) + return nil, 0, fmt.Errorf("failed to get final advertise address: %v", err) } m.setAdvertise(addr, port) return addr, port, nil @@ -763,10 +774,24 @@ func (m *Memberlist) getNodeStateChange(addr string) time.Time { return n.StateChange } -func (m *Memberlist) changeNode(addr string, f func(*nodeState)) { +func (m *Memberlist) changeNode(addr string, f func(*NodeState)) { m.nodeLock.Lock() defer m.nodeLock.Unlock() n := m.nodeMap[addr] f(n) } + +// checkBroadcastQueueDepth periodically checks the size of the broadcast queue +// to see if it is too large +func (m *Memberlist) checkBroadcastQueueDepth() { + for { + select { + case <-time.After(m.config.QueueCheckInterval): + numq := m.broadcasts.NumQueued() + metrics.AddSampleWithLabels([]string{"memberlist", "queue", "broadcasts"}, float32(numq), m.metricLabels) + case <-m.shutdownCh: + return + } + } +} diff --git a/vendor/github.com/hashicorp/memberlist/merge_delegate.go b/vendor/github.com/hashicorp/memberlist/merge_delegate.go index 89afb59f201ad..0cb16427cd4e9 100644 --- a/vendor/github.com/hashicorp/memberlist/merge_delegate.go +++ b/vendor/github.com/hashicorp/memberlist/merge_delegate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist // MergeDelegate is used to involve a client in diff --git a/vendor/github.com/hashicorp/memberlist/mock_transport.go b/vendor/github.com/hashicorp/memberlist/mock_transport.go index 0a7d30a277d40..58f2fa9d57df5 100644 --- a/vendor/github.com/hashicorp/memberlist/mock_transport.go +++ b/vendor/github.com/hashicorp/memberlist/mock_transport.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( @@ -78,7 +81,7 @@ func (t *MockTransport) FinalAdvertiseAddr(string, int) (net.IP, int, error) { ip := net.ParseIP(host) if ip == nil { - return nil, 0, fmt.Errorf("Failed to parse IP %q", host) + return nil, 0, fmt.Errorf("failed to parse IP %q", host) } port, err := strconv.ParseInt(portStr, 10, 16) @@ -119,7 +122,9 @@ func (t *MockTransport) PacketCh() <-chan *Packet { // See NodeAwareTransport. func (t *MockTransport) IngestPacket(conn net.Conn, addr net.Addr, now time.Time, shouldClose bool) error { if shouldClose { - defer conn.Close() + defer func() { + _ = conn.Close() + }() } // Copy everything from the stream into packet buffer. @@ -189,7 +194,7 @@ func (t *MockTransport) getPeer(a Address) (*MockTransport, error) { dest, ok = t.net.transportsByAddr[a.Addr] } if !ok { - return nil, fmt.Errorf("No route to %s", a) + return nil, fmt.Errorf("no route to %s", a) } return dest, nil } diff --git a/vendor/github.com/hashicorp/memberlist/net.go b/vendor/github.com/hashicorp/memberlist/net.go index 609e01dd9d031..467a0e7139018 100644 --- a/vendor/github.com/hashicorp/memberlist/net.go +++ b/vendor/github.com/hashicorp/memberlist/net.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( @@ -12,8 +15,8 @@ import ( "sync/atomic" "time" - metrics "github.com/armon/go-metrics" - "github.com/hashicorp/go-msgpack/codec" + metrics "github.com/hashicorp/go-metrics/compat" + "github.com/hashicorp/go-msgpack/v2/codec" ) // This is the minimum and maximum protocol version that we can @@ -231,23 +234,33 @@ func (m *Memberlist) streamListen() { // handleConn handles a single incoming stream connection from the transport. func (m *Memberlist) handleConn(conn net.Conn) { - defer conn.Close() m.logger.Printf("[DEBUG] memberlist: Stream connection %s", LogConn(conn)) - metrics.IncrCounter([]string{"memberlist", "tcp", "accept"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "tcp", "accept"}, 1, m.metricLabels) - conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)) + if err := conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)); err != nil { + m.logger.Printf("Err: Could not set the deadline: %s", err) + } var ( streamLabel string err error + // Store the original conn, because the code below shadows it. + // If reading the label header from the stream fail, we should still close the connection. + origConn = conn ) conn, streamLabel, err = RemoveLabelHeaderFromStream(conn) if err != nil { - m.logger.Printf("[ERR] memberlist: failed to receive and remove the stream label header: %s %s", err, LogConn(conn)) + m.logger.Printf("[ERR] memberlist: failed to receive and remove the stream label header: %s %s", err, LogConn(origConn)) + _ = origConn.Close() return } + defer func() { + // Always close the wrapped connection, that we got after removing the label header. + _ = conn.Close() + }() + if m.config.SkipInboundLabelCheck { // Set this from config so that the auth data assertions work below. streamLabel = m.config.Label @@ -264,7 +277,7 @@ func (m *Memberlist) handleConn(conn net.Conn) { m.logger.Printf("[ERR] memberlist: failed to receive: %s %s", err, LogConn(conn)) resp := errResp{err.Error()} - out, err := encode(errMsg, &resp) + out, err := encode(errMsg, &resp, m.config.MsgpackUseNewTimeFormat) if err != nil { m.logger.Printf("[ERR] memberlist: Failed to encode error response: %s", err) return @@ -323,7 +336,7 @@ func (m *Memberlist) handleConn(conn net.Conn) { } ack := ackResp{p.SeqNo, nil} - out, err := encode(ackRespMsg, &ack) + out, err := encode(ackRespMsg, &ack, m.config.MsgpackUseNewTimeFormat) if err != nil { m.logger.Printf("[ERR] memberlist: Failed to encode ack: %s", err) return @@ -696,7 +709,7 @@ func (m *Memberlist) ensureCanConnect(from net.Addr) error { ip := net.ParseIP(host) if ip == nil { - return fmt.Errorf("Cannot parse IP from %s", host) + return fmt.Errorf("cannot parse IP from %s", host) } return m.config.IPAllowed(ip) } @@ -762,7 +775,7 @@ func (m *Memberlist) handleCompressed(buf []byte, from net.Addr, timestamp time. // encodeAndSendMsg is used to combine the encoding and sending steps func (m *Memberlist) encodeAndSendMsg(a Address, msgType messageType, msg interface{}) error { - out, err := encode(msgType, msg) + out, err := encode(msgType, msg, m.config.MsgpackUseNewTimeFormat) if err != nil { return err } @@ -814,7 +827,7 @@ func (m *Memberlist) rawSendMsgPacket(a Address, node *Node, msg []byte) error { // Check if we have compression enabled if m.config.EnableCompression { - buf, err := compressPayload(msg) + buf, err := compressPayload(msg, m.config.MsgpackUseNewTimeFormat) if err != nil { m.logger.Printf("[WARN] memberlist: Failed to compress payload: %v", err) } else { @@ -867,7 +880,7 @@ func (m *Memberlist) rawSendMsgPacket(a Address, node *Node, msg []byte) error { msg = buf.Bytes() } - metrics.IncrCounter([]string{"memberlist", "udp", "sent"}, float32(len(msg))) + metrics.IncrCounterWithLabels([]string{"memberlist", "udp", "sent"}, float32(len(msg)), m.metricLabels) _, err := m.transport.WriteToAddress(msg, a) return err } @@ -877,7 +890,7 @@ func (m *Memberlist) rawSendMsgPacket(a Address, node *Node, msg []byte) error { func (m *Memberlist) rawSendMsgStream(conn net.Conn, sendBuf []byte, streamLabel string) error { // Check if compression is enabled if m.config.EnableCompression { - compBuf, err := compressPayload(sendBuf) + compBuf, err := compressPayload(sendBuf, m.config.MsgpackUseNewTimeFormat) if err != nil { m.logger.Printf("[ERROR] memberlist: Failed to compress payload: %v", err) } else { @@ -896,7 +909,7 @@ func (m *Memberlist) rawSendMsgStream(conn net.Conn, sendBuf []byte, streamLabel } // Write out the entire send buffer - metrics.IncrCounter([]string{"memberlist", "tcp", "sent"}, float32(len(sendBuf))) + metrics.IncrCounterWithLabels([]string{"memberlist", "tcp", "sent"}, float32(len(sendBuf)), m.metricLabels) if n, err := conn.Write(sendBuf); err != nil { return err @@ -917,7 +930,9 @@ func (m *Memberlist) sendUserMsg(a Address, sendBuf []byte) error { if err != nil { return err } - defer conn.Close() + defer func() { + _ = conn.Close() + }() bufConn := bytes.NewBuffer(nil) if err := bufConn.WriteByte(byte(userMsg)); err != nil { @@ -926,6 +941,8 @@ func (m *Memberlist) sendUserMsg(a Address, sendBuf []byte) error { header := userMsgHeader{UserMsgLen: len(sendBuf)} hd := codec.MsgpackHandle{} + hd.TimeNotBuiltin = !m.config.MsgpackUseNewTimeFormat + enc := codec.NewEncoder(bufConn, &hd) if err := enc.Encode(&header); err != nil { return err @@ -949,16 +966,20 @@ func (m *Memberlist) sendAndReceiveState(a Address, join bool) ([]pushNodeState, if err != nil { return nil, nil, err } - defer conn.Close() + defer func() { + _ = conn.Close() + }() m.logger.Printf("[DEBUG] memberlist: Initiating push/pull sync with: %s %s", a.Name, conn.RemoteAddr()) - metrics.IncrCounter([]string{"memberlist", "tcp", "connect"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "tcp", "connect"}, 1, m.metricLabels) // Send our state if err := m.sendLocalState(conn, join, m.config.Label); err != nil { return nil, nil, err } - conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)) + if err := conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)); err != nil { + m.logger.Printf("Err: Could not set the deadline: %s", err) + } msgType, bufConn, dec, err := m.readStream(conn, m.config.Label) if err != nil { return nil, nil, err @@ -986,7 +1007,9 @@ func (m *Memberlist) sendAndReceiveState(a Address, join bool) ([]pushNodeState, // sendLocalState is invoked to send our local state over a stream connection. func (m *Memberlist) sendLocalState(conn net.Conn, join bool, streamLabel string) error { // Setup a deadline - conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)) + if err := conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)); err != nil { + m.logger.Printf("Err: Could not set the deadline: %s", err) + } // Prepare the local node state m.nodeLock.RLock() @@ -1005,6 +1028,22 @@ func (m *Memberlist) sendLocalState(conn net.Conn, join bool, streamLabel string } m.nodeLock.RUnlock() + nodeStateCounts := make(map[string]int) + nodeStateCounts[StateAlive.metricsString()] = 0 + nodeStateCounts[StateLeft.metricsString()] = 0 + nodeStateCounts[StateDead.metricsString()] = 0 + nodeStateCounts[StateSuspect.metricsString()] = 0 + + for _, n := range localNodes { + nodeStateCounts[n.State.metricsString()]++ + } + + for nodeState, cnt := range nodeStateCounts { + metrics.SetGaugeWithLabels([]string{"memberlist", "node", "instances"}, + float32(cnt), + append(m.metricLabels, metrics.Label{Name: "node_state", Value: nodeState})) + } + // Get the delegate state var userData []byte if m.config.Delegate != nil { @@ -1040,6 +1079,9 @@ func (m *Memberlist) sendLocalState(conn net.Conn, join bool, streamLabel string } } + moreBytes := binary.BigEndian.Uint32(bufConn.Bytes()[1:5]) + metrics.SetGaugeWithLabels([]string{"memberlist", "size", "local"}, float32(moreBytes), m.metricLabels) + // Get the send buffer return m.rawSendMsgStream(conn, bufConn.Bytes(), streamLabel) } @@ -1086,8 +1128,10 @@ func (m *Memberlist) decryptRemoteState(bufConn io.Reader, streamLabel string) ( // Ensure we aren't asked to download too much. This is to guard against // an attack vector where a huge amount of state is sent moreBytes := binary.BigEndian.Uint32(cipherText.Bytes()[1:5]) + metrics.AddSampleWithLabels([]string{"memberlist", "size", "remote"}, float32(moreBytes), m.metricLabels) + if moreBytes > maxPushStateBytes { - return nil, fmt.Errorf("Remote node state is larger than limit (%d)", moreBytes) + return nil, fmt.Errorf("remote node state is larger than limit (%d)", moreBytes) } @@ -1136,7 +1180,7 @@ func (m *Memberlist) readStream(conn net.Conn, streamLabel string) (messageType, if msgType == encryptMsg { if !m.config.EncryptionEnabled() { return 0, nil, nil, - fmt.Errorf("Remote state is encrypted and encryption is not configured") + fmt.Errorf("remote state is encrypted and encryption is not configured") } plain, err := m.decryptRemoteState(bufConn, streamLabel) @@ -1149,7 +1193,7 @@ func (m *Memberlist) readStream(conn net.Conn, streamLabel string) (messageType, bufConn = bytes.NewReader(plain[1:]) } else if m.config.EncryptionEnabled() && m.config.GossipVerifyIncoming { return 0, nil, nil, - fmt.Errorf("Encryption is configured but remote state is not encrypted") + fmt.Errorf("encryption is configured but remote state is not encrypted") } // Get the msgPack decoders @@ -1205,7 +1249,7 @@ func (m *Memberlist) readRemoteState(bufConn io.Reader, dec *codec.Decoder) (boo bytes, err := io.ReadAtLeast(bufConn, userBuf, header.UserStateLen) if err == nil && bytes != header.UserStateLen { err = fmt.Errorf( - "Failed to read full user state (%d / %d)", + "failed to read full user state (%d / %d)", bytes, header.UserStateLen) } if err != nil { @@ -1278,7 +1322,7 @@ func (m *Memberlist) readUserMsg(bufConn io.Reader, dec *codec.Decoder) error { bytes, err := io.ReadAtLeast(bufConn, userBuf, header.UserMsgLen) if err == nil && bytes != header.UserMsgLen { err = fmt.Errorf( - "Failed to read full user message (%d / %d)", + "failed to read full user message (%d / %d)", bytes, header.UserMsgLen) } if err != nil { @@ -1303,7 +1347,7 @@ func (m *Memberlist) sendPingAndWaitForAck(a Address, ping ping, deadline time.T return false, errNodeNamesAreRequired } - conn, err := m.transport.DialAddressTimeout(a, deadline.Sub(time.Now())) + conn, err := m.transport.DialAddressTimeout(a, time.Until(deadline)) if err != nil { // If the node is actually dead we expect this to fail, so we // shouldn't spam the logs with it. After this point, errors @@ -1311,10 +1355,12 @@ func (m *Memberlist) sendPingAndWaitForAck(a Address, ping ping, deadline time.T // get propagated up. return false, nil } - defer conn.Close() - conn.SetDeadline(deadline) + defer func() { + _ = conn.Close() + }() + _ = conn.SetDeadline(deadline) - out, err := encode(pingMsg, &ping) + out, err := encode(pingMsg, &ping, m.config.MsgpackUseNewTimeFormat) if err != nil { return false, err } @@ -1329,7 +1375,7 @@ func (m *Memberlist) sendPingAndWaitForAck(a Address, ping ping, deadline time.T } if msgType != ackRespMsg { - return false, fmt.Errorf("Unexpected msgType (%d) from ping %s", msgType, LogConn(conn)) + return false, fmt.Errorf("unexpected msgType (%d) from ping %s", msgType, LogConn(conn)) } var ack ackResp @@ -1338,7 +1384,7 @@ func (m *Memberlist) sendPingAndWaitForAck(a Address, ping ping, deadline time.T } if ack.SeqNo != ping.SeqNo { - return false, fmt.Errorf("Sequence number from ack (%d) doesn't match ping (%d)", ack.SeqNo, ping.SeqNo) + return false, fmt.Errorf("sequence number from ack (%d) doesn't match ping (%d)", ack.SeqNo, ping.SeqNo) } return true, nil diff --git a/vendor/github.com/hashicorp/memberlist/net_transport.go b/vendor/github.com/hashicorp/memberlist/net_transport.go index 05830117297c2..cd66b752658d8 100644 --- a/vendor/github.com/hashicorp/memberlist/net_transport.go +++ b/vendor/github.com/hashicorp/memberlist/net_transport.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( @@ -10,7 +13,7 @@ import ( "sync/atomic" "time" - "github.com/armon/go-metrics" + metrics "github.com/hashicorp/go-metrics/compat" sockaddr "github.com/hashicorp/go-sockaddr" ) @@ -35,6 +38,10 @@ type NetTransportConfig struct { // Logger is a logger for operator messages. Logger *log.Logger + + // MetricLabels is a map of optional labels to apply to all metrics + // emitted by this transport. + MetricLabels []metrics.Label } // NetTransport is a Transport implementation that uses connectionless UDP for @@ -48,6 +55,8 @@ type NetTransport struct { tcpListeners []*net.TCPListener udpListeners []*net.UDPConn shutdown int32 + + metricLabels []metrics.Label } var _ NodeAwareTransport = (*NetTransport)(nil) @@ -58,22 +67,23 @@ func NewNetTransport(config *NetTransportConfig) (*NetTransport, error) { // If we reject the empty list outright we can assume that there's at // least one listener of each type later during operation. if len(config.BindAddrs) == 0 { - return nil, fmt.Errorf("At least one bind address is required") + return nil, fmt.Errorf("at least one bind address is required") } // Build out the new transport. var ok bool t := NetTransport{ - config: config, - packetCh: make(chan *Packet), - streamCh: make(chan net.Conn), - logger: config.Logger, + config: config, + packetCh: make(chan *Packet), + streamCh: make(chan net.Conn), + logger: config.Logger, + metricLabels: config.MetricLabels, } // Clean up listeners if there's an error. defer func() { if !ok { - t.Shutdown() + _ = t.Shutdown() } }() @@ -85,7 +95,7 @@ func NewNetTransport(config *NetTransportConfig) (*NetTransport, error) { tcpAddr := &net.TCPAddr{IP: ip, Port: port} tcpLn, err := net.ListenTCP("tcp", tcpAddr) if err != nil { - return nil, fmt.Errorf("Failed to start TCP listener on %q port %d: %v", addr, port, err) + return nil, fmt.Errorf("failed to start TCP listener on %q port %d: %v", addr, port, err) } t.tcpListeners = append(t.tcpListeners, tcpLn) @@ -99,10 +109,10 @@ func NewNetTransport(config *NetTransportConfig) (*NetTransport, error) { udpAddr := &net.UDPAddr{IP: ip, Port: port} udpLn, err := net.ListenUDP("udp", udpAddr) if err != nil { - return nil, fmt.Errorf("Failed to start UDP listener on %q port %d: %v", addr, port, err) + return nil, fmt.Errorf("failed to start UDP listener on %q port %d: %v", addr, port, err) } if err := setUDPRecvBuf(udpLn); err != nil { - return nil, fmt.Errorf("Failed to resize UDP buffer: %v", err) + return nil, fmt.Errorf("failed to resize UDP buffer: %v", err) } t.udpListeners = append(t.udpListeners, udpLn) } @@ -134,7 +144,7 @@ func (t *NetTransport) FinalAdvertiseAddr(ip string, port int) (net.IP, int, err // If they've supplied an address, use that. advertiseAddr = net.ParseIP(ip) if advertiseAddr == nil { - return nil, 0, fmt.Errorf("Failed to parse advertise address %q", ip) + return nil, 0, fmt.Errorf("failed to parse advertise address %q", ip) } // Ensure IPv4 conversion if necessary. @@ -149,15 +159,15 @@ func (t *NetTransport) FinalAdvertiseAddr(ip string, port int) (net.IP, int, err var err error ip, err = sockaddr.GetPrivateIP() if err != nil { - return nil, 0, fmt.Errorf("Failed to get interface addresses: %v", err) + return nil, 0, fmt.Errorf("failed to get interface addresses: %v", err) } if ip == "" { - return nil, 0, fmt.Errorf("No private IP address found, and explicit IP not provided") + return nil, 0, fmt.Errorf("no private IP address found, and explicit IP not provided") } advertiseAddr = net.ParseIP(ip) if advertiseAddr == nil { - return nil, 0, fmt.Errorf("Failed to parse advertise address: %q", ip) + return nil, 0, fmt.Errorf("failed to parse advertise address: %q", ip) } } else { // Use the IP that we're bound to, based on the first @@ -203,7 +213,9 @@ func (t *NetTransport) PacketCh() <-chan *Packet { // See IngestionAwareTransport. func (t *NetTransport) IngestPacket(conn net.Conn, addr net.Addr, now time.Time, shouldClose bool) error { if shouldClose { - defer conn.Close() + defer func() { + _ = conn.Close() + }() } // Copy everything from the stream into packet buffer. @@ -260,10 +272,10 @@ func (t *NetTransport) Shutdown() error { // Rip through all the connections and shut them down. for _, conn := range t.tcpListeners { - conn.Close() + _ = conn.Close() } for _, conn := range t.udpListeners { - conn.Close() + _ = conn.Close() } // Block until all the listener threads have died. @@ -341,7 +353,7 @@ func (t *NetTransport) udpListen(udpLn *net.UDPConn) { } // Ingest the packet. - metrics.IncrCounter([]string{"memberlist", "udp", "received"}, float32(n)) + metrics.IncrCounterWithLabels([]string{"memberlist", "udp", "received"}, float32(n), t.metricLabels) t.packetCh <- &Packet{ Buf: buf[:n], From: addr, diff --git a/vendor/github.com/hashicorp/memberlist/node_selection_delegate.go b/vendor/github.com/hashicorp/memberlist/node_selection_delegate.go new file mode 100644 index 0000000000000..fe77d62e037c9 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/node_selection_delegate.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: MPL-2.0 + +package memberlist + +// NodeSelectionDelegate is an optional delegate that can be used to filter and prioritize +// nodes for gossip, and push/pull operations. This allows implementing custom routing logic, +// such as zone-aware or rack-aware gossiping. +// +// This delegate is not used for probes (health checks). When implementing zone-aware gossiping, +// probes can bypass this delegate. +type NodeSelectionDelegate interface { + // SelectNodes filters and prioritizes nodes for selection. It receives all candidate nodes + // and returns: + // - selected: the nodes that should be included in the selection pool + // - preferred: an optional single node that should be prioritized. During a gossip cycle, + // the preferred node is always included if present. If nil, all gossip targets + // are chosen randomly from the selected nodes. + // It is not necessary to include the preferred node in the selected ones nor to explicitly remove it from them. + // The input NodeState slice cannot be manipulated in-place, but if all input nodes are selected + // then it's safe to return the input slice as is. + // + // It's not required for the preferred node to be included in the selected slice. The preferred + // node would be picked anyway. + SelectNodes([]*NodeState) (selected []*NodeState, preferred *NodeState) +} diff --git a/vendor/github.com/hashicorp/memberlist/ping_delegate.go b/vendor/github.com/hashicorp/memberlist/ping_delegate.go index 1566c8b3d5014..0396835527689 100644 --- a/vendor/github.com/hashicorp/memberlist/ping_delegate.go +++ b/vendor/github.com/hashicorp/memberlist/ping_delegate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import "time" diff --git a/vendor/github.com/hashicorp/memberlist/queue.go b/vendor/github.com/hashicorp/memberlist/queue.go index 2eb33c5444d87..e1c9c6ff57d6b 100644 --- a/vendor/github.com/hashicorp/memberlist/queue.go +++ b/vendor/github.com/hashicorp/memberlist/queue.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( @@ -131,13 +134,13 @@ type Broadcast interface { // You shoud ensure that Invalidates() checks the same uniqueness as the // example below: // -// func (b *foo) Invalidates(other Broadcast) bool { -// nb, ok := other.(NamedBroadcast) -// if !ok { -// return false -// } -// return b.Name() == nb.Name() -// } +// func (b *foo) Invalidates(other Broadcast) bool { +// nb, ok := other.(NamedBroadcast) +// if !ok { +// return false +// } +// return b.Name() == nb.Name() +// } // // Invalidates() isn't currently used for NamedBroadcasts, but that may change // in the future. diff --git a/vendor/github.com/hashicorp/memberlist/security.go b/vendor/github.com/hashicorp/memberlist/security.go index 6831be3bc6258..70090d6a35a23 100644 --- a/vendor/github.com/hashicorp/memberlist/security.go +++ b/vendor/github.com/hashicorp/memberlist/security.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( @@ -10,14 +13,12 @@ import ( ) /* - Encrypted messages are prefixed with an encryptionVersion byte that is used for us to be able to properly encode/decode. We currently support the following versions: - 0 - AES-GCM 128, using PKCS7 padding - 1 - AES-GCM 128, no padding. Padding not needed, caused bloat. - + 0 - AES-GCM 128, using PKCS7 padding + 1 - AES-GCM 128, no padding. Padding not needed, caused bloat. */ type encryptionVersion uint8 @@ -114,7 +115,7 @@ func encryptPayload(vsn encryptionVersion, key []byte, msg []byte, data []byte, // Ensure we are correctly padded (only version 0) if vsn == 0 { - io.Copy(dst, bytes.NewReader(msg)) + _, _ = io.Copy(dst, bytes.NewReader(msg)) pkcs7encode(dst, offset+versionSize+nonceSize, aes.BlockSize) } @@ -171,18 +172,18 @@ func decryptMessage(key, msg []byte, data []byte) ([]byte, error) { func decryptPayload(keys [][]byte, msg []byte, data []byte) ([]byte, error) { // Ensure we have at least one byte if len(msg) == 0 { - return nil, fmt.Errorf("Cannot decrypt empty payload") + return nil, fmt.Errorf("cannot decrypt empty payload") } // Verify the version vsn := encryptionVersion(msg[0]) if vsn > maxEncryptionVersion { - return nil, fmt.Errorf("Unsupported encryption version %d", msg[0]) + return nil, fmt.Errorf("unsupported encryption version %d", msg[0]) } // Ensure the length is sane if len(msg) < encryptedLength(vsn, 0) { - return nil, fmt.Errorf("Payload is too small to decrypt: %d", len(msg)) + return nil, fmt.Errorf("payload is too small to decrypt: %d", len(msg)) } for _, key := range keys { @@ -197,7 +198,7 @@ func decryptPayload(keys [][]byte, msg []byte, data []byte) ([]byte, error) { } } - return nil, fmt.Errorf("No installed keys could decrypt the message") + return nil, fmt.Errorf("no installed keys could decrypt the message") } func appendBytes(first []byte, second []byte) []byte { diff --git a/vendor/github.com/hashicorp/memberlist/state.go b/vendor/github.com/hashicorp/memberlist/state.go index 7a2339e9b02c1..919f716656f6d 100644 --- a/vendor/github.com/hashicorp/memberlist/state.go +++ b/vendor/github.com/hashicorp/memberlist/state.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( @@ -10,11 +13,26 @@ import ( "sync/atomic" "time" - metrics "github.com/armon/go-metrics" + metrics "github.com/hashicorp/go-metrics/compat" ) type NodeStateType int +func (t NodeStateType) metricsString() string { + switch t { + case StateAlive: + return "alive" + case StateDead: + return "dead" + case StateSuspect: + return "suspect" + case StateLeft: + return "left" + default: + return fmt.Sprintf("unhandled-value-%d", t) + } +} + const ( StateAlive NodeStateType = iota StateSuspect @@ -58,7 +76,7 @@ func (n *Node) String() string { } // NodeState is used to manage our state view of another node -type nodeState struct { +type NodeState struct { Node Incarnation uint32 // Last known incarnation number State NodeStateType // Current state @@ -67,17 +85,17 @@ type nodeState struct { // Address returns the host:port form of a node's address, suitable for use // with a transport. -func (n *nodeState) Address() string { +func (n *NodeState) Address() string { return n.Node.Address() } // FullAddress returns the node name and host:port form of a node's address, // suitable for use with a transport. -func (n *nodeState) FullAddress() Address { +func (n *NodeState) FullAddress() Address { return n.Node.FullAddress() } -func (n *nodeState) DeadOrLeft() bool { +func (n *NodeState) DeadOrLeft() bool { return n.State == StateDead || n.State == StateLeft } @@ -234,9 +252,8 @@ START: // Determine if we should probe this node skip := false - var node nodeState + node := *m.nodes[m.probeIndex] - node = *m.nodes[m.probeIndex] if node.Name == m.config.Name { skip = true } else if node.DeadOrLeft() { @@ -285,15 +302,15 @@ func failedRemote(err error) bool { } // probeNode handles a single round of failure checking on a node. -func (m *Memberlist) probeNode(node *nodeState) { - defer metrics.MeasureSince([]string{"memberlist", "probeNode"}, time.Now()) +func (m *Memberlist) probeNode(node *NodeState) { + defer metrics.MeasureSinceWithLabels([]string{"memberlist", "probeNode"}, time.Now(), m.metricLabels) // We use our health awareness to scale the overall probe interval, so we // slow down if we detect problems. The ticker that calls us can handle // us running over the base interval, and will skip missed ticks. probeInterval := m.awareness.ScaleTimeout(m.config.ProbeInterval) if probeInterval > m.config.ProbeInterval { - metrics.IncrCounter([]string{"memberlist", "degraded", "probe"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "degraded", "probe"}, 1, m.metricLabels) } // Prepare a ping message and setup an ack handler. @@ -338,14 +355,14 @@ func (m *Memberlist) probeNode(node *nodeState) { } } else { var msgs [][]byte - if buf, err := encode(pingMsg, &ping); err != nil { + if buf, err := encode(pingMsg, &ping, m.config.MsgpackUseNewTimeFormat); err != nil { m.logger.Printf("[ERR] memberlist: Failed to encode UDP ping message: %s", err) return } else { msgs = append(msgs, buf.Bytes()) } s := suspect{Incarnation: node.Incarnation, Node: node.Name, From: m.config.Name} - if buf, err := encode(suspectMsg, &s); err != nil { + if buf, err := encode(suspectMsg, &s, m.config.MsgpackUseNewTimeFormat); err != nil { m.logger.Printf("[ERR] memberlist: Failed to encode suspect message: %s", err) return } else { @@ -373,7 +390,7 @@ func (m *Memberlist) probeNode(node *nodeState) { // Wait for response or round-trip-time. select { case v := <-ackCh: - if v.Complete == true { + if v.Complete { if m.config.Ping != nil { rtt := v.Timestamp.Sub(sent) m.config.Ping.NotifyPingComplete(&node.Node, rtt, v.Payload) @@ -383,7 +400,7 @@ func (m *Memberlist) probeNode(node *nodeState) { // As an edge case, if we get a timeout, we need to re-enqueue it // here to break out of the select below. - if v.Complete == false { + if !v.Complete { ackCh <- v } case <-time.After(m.config.ProbeTimeout): @@ -398,8 +415,10 @@ func (m *Memberlist) probeNode(node *nodeState) { HANDLE_REMOTE_FAILURE: // Get some random live nodes. + // We intentionally don't use the node selector here for now, because we don't want to limit + // indirect probes. We may reconsider this in the future. m.nodeLock.RLock() - kNodes := kRandomNodes(m.config.IndirectChecks, m.nodes, func(n *nodeState) bool { + kNodes := kRandomNodes(m.config.IndirectChecks, m.nodes, nil, func(n *NodeState) bool { return n.Name == m.config.Name || n.Name == node.Name || n.State != StateAlive @@ -466,11 +485,9 @@ HANDLE_REMOTE_FAILURE: // channel here because we want to issue a warning below if that's the // *only* way we hear back from the peer, so we have to let this time // out first to allow the normal UDP-based acks to come in. - select { - case v := <-ackCh: - if v.Complete == true { - return - } + v := <-ackCh + if v.Complete { + return } // Finally, poll the fallback channel. The timeouts are set such that @@ -534,7 +551,7 @@ func (m *Memberlist) Ping(node string, addr net.Addr) (time.Duration, error) { // Wait for response or timeout. select { case v := <-ackCh: - if v.Complete == true { + if v.Complete { return v.Timestamp.Sub(sent), nil } case <-time.After(m.config.ProbeTimeout): @@ -573,11 +590,11 @@ func (m *Memberlist) resetNodes() { // gossip is invoked every GossipInterval period to broadcast our gossip // messages to a few random nodes. func (m *Memberlist) gossip() { - defer metrics.MeasureSince([]string{"memberlist", "gossip"}, time.Now()) + defer metrics.MeasureSinceWithLabels([]string{"memberlist", "gossip"}, time.Now(), m.metricLabels) // Get some random live, suspect, or recently dead nodes m.nodeLock.RLock() - kNodes := kRandomNodes(m.config.GossipNodes, m.nodes, func(n *nodeState) bool { + kNodes := kRandomNodes(m.config.GossipNodes, m.nodes, m.config.NodeSelection, func(n *NodeState) bool { if n.Name == m.config.Name { return true } @@ -631,9 +648,15 @@ func (m *Memberlist) gossip() { // reasonably expensive as the entire state of this node is exchanged // with the other node. func (m *Memberlist) pushPull() { - // Get a random live node + // Determine how many nodes to push/pull with + numNodes := m.config.PushPullNodes + if numNodes <= 0 { + numNodes = 1 + } + + // Get random live nodes m.nodeLock.RLock() - nodes := kRandomNodes(1, m.nodes, func(n *nodeState) bool { + nodes := kRandomNodes(numNodes, m.nodes, m.config.NodeSelection, func(n *NodeState) bool { return n.Name == m.config.Name || n.State != StateAlive }) @@ -643,17 +666,18 @@ func (m *Memberlist) pushPull() { if len(nodes) == 0 { return } - node := nodes[0] - // Attempt a push pull - if err := m.pushPullNode(node.FullAddress(), false); err != nil { - m.logger.Printf("[ERR] memberlist: Push/Pull with %s failed: %s", node.Name, err) + // Attempt push/pull with each selected node + for _, node := range nodes { + if err := m.pushPullNode(node.FullAddress(), false); err != nil { + m.logger.Printf("[ERR] memberlist: Push/Pull with %s failed: %s", node.Name, err) + } } } // pushPullNode does a complete state exchange with a specific node. func (m *Memberlist) pushPullNode(a Address, join bool) error { - defer metrics.MeasureSince([]string{"memberlist", "pushPullNode"}, time.Now()) + defer metrics.MeasureSinceWithLabels([]string{"memberlist", "pushPullNode"}, time.Now(), m.metricLabels) // Attempt to send and receive with the node remote, userState, err := m.sendAndReceiveState(a, join) @@ -897,7 +921,7 @@ func (m *Memberlist) invokeNackHandler(nack nackResp) { // accusedInc value, or you can supply 0 to just get the next incarnation number. // This alters the node state that's passed in so this MUST be called while the // nodeLock is held. -func (m *Memberlist) refute(me *nodeState, accusedInc uint32) { +func (m *Memberlist) refute(me *NodeState, accusedInc uint32) { // Make sure the incarnation number beats the accusation. inc := m.nextIncarnation() if accusedInc >= inc { @@ -986,7 +1010,7 @@ func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { m.logger.Printf("[WARN] memberlist: Rejected node %s (%v): %s", a.Node, net.IP(a.Addr), errCon) return } - state = &nodeState{ + state = &NodeState{ Node: Node{ Name: a.Node, Addr: a.Addr, @@ -1125,7 +1149,7 @@ func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { } // Update metrics - metrics.IncrCounter([]string{"memberlist", "msg", "alive"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "msg", "alive"}, 1, m.metricLabels) // Notify the delegate of any relevant updates if m.config.Events != nil { @@ -1183,7 +1207,7 @@ func (m *Memberlist) suspectNode(s *suspect) { } // Update metrics - metrics.IncrCounter([]string{"memberlist", "msg", "suspect"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "msg", "suspect"}, 1, m.metricLabels) // Update the state state.Incarnation = s.Incarnation @@ -1213,7 +1237,7 @@ func (m *Memberlist) suspectNode(s *suspect) { m.nodeLock.Lock() state, ok := m.nodeMap[s.Node] - timeout := ok && state.State == StateSuspect && state.StateChange == changeTime + timeout := ok && state.State == StateSuspect && state.StateChange.Equal(changeTime) if timeout { d = &dead{Incarnation: state.Incarnation, Node: state.Name, From: m.config.Name} } @@ -1221,7 +1245,7 @@ func (m *Memberlist) suspectNode(s *suspect) { if timeout { if k > 0 && numConfirmations < k { - metrics.IncrCounter([]string{"memberlist", "degraded", "timeout"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "degraded", "timeout"}, 1, m.metricLabels) } m.logger.Printf("[INFO] memberlist: Marking %s as failed, suspect timeout reached (%d peer confirmations)", @@ -1274,7 +1298,7 @@ func (m *Memberlist) deadNode(d *dead) { } // Update metrics - metrics.IncrCounter([]string{"memberlist", "msg", "dead"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "msg", "dead"}, 1, m.metricLabels) // Update the state state.Incarnation = d.Incarnation diff --git a/vendor/github.com/hashicorp/memberlist/suspicion.go b/vendor/github.com/hashicorp/memberlist/suspicion.go index f8aa9e20a8ebe..314f5f9c6e0d8 100644 --- a/vendor/github.com/hashicorp/memberlist/suspicion.go +++ b/vendor/github.com/hashicorp/memberlist/suspicion.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( diff --git a/vendor/github.com/hashicorp/memberlist/tag.sh b/vendor/github.com/hashicorp/memberlist/tag.sh index cd16623a70dc6..637e70029ffdd 100644 --- a/vendor/github.com/hashicorp/memberlist/tag.sh +++ b/vendor/github.com/hashicorp/memberlist/tag.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + set -e # The version must be supplied from the environment. Do not include the diff --git a/vendor/github.com/hashicorp/memberlist/transport.go b/vendor/github.com/hashicorp/memberlist/transport.go index f3d05364d73df..8b7d55973e216 100644 --- a/vendor/github.com/hashicorp/memberlist/transport.go +++ b/vendor/github.com/hashicorp/memberlist/transport.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( diff --git a/vendor/github.com/hashicorp/memberlist/util.go b/vendor/github.com/hashicorp/memberlist/util.go index 8f609c1e0f96a..74e081e2f8c5e 100644 --- a/vendor/github.com/hashicorp/memberlist/util.go +++ b/vendor/github.com/hashicorp/memberlist/util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package memberlist import ( @@ -13,7 +16,7 @@ import ( "strings" "time" - "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/go-msgpack/v2/codec" "github.com/sean-/seed" ) @@ -30,7 +33,7 @@ const ( ) func init() { - seed.Init() + _, _ = seed.Init() } // Decode reverses the encode operation on a byte slice input @@ -42,10 +45,12 @@ func decode(buf []byte, out interface{}) error { } // Encode writes an encoded object to a new bytes buffer -func encode(msgType messageType, in interface{}) (*bytes.Buffer, error) { +func encode(msgType messageType, in interface{}, msgpackUseNewTimeFormat bool) (*bytes.Buffer, error) { buf := bytes.NewBuffer(nil) buf.WriteByte(uint8(msgType)) hd := codec.MsgpackHandle{} + hd.TimeNotBuiltin = !msgpackUseNewTimeFormat + enc := codec.NewEncoder(buf, &hd) err := enc.Encode(in) return buf, err @@ -76,7 +81,7 @@ func retransmitLimit(retransmitMult, n int) int { } // shuffleNodes randomly shuffles the input nodes using the Fisher-Yates shuffle -func shuffleNodes(nodes []*nodeState) { +func shuffleNodes(nodes []*NodeState) { n := len(nodes) rand.Shuffle(n, func(i, j int) { nodes[i], nodes[j] = nodes[j], nodes[i] @@ -98,7 +103,7 @@ func pushPullScale(interval time.Duration, n int) time.Duration { // moveDeadNodes moves dead and left nodes that that have not changed during the gossipToTheDeadTime interval // to the end of the slice and returns the index of the first moved node. -func moveDeadNodes(nodes []*nodeState, gossipToTheDeadTime time.Duration) int { +func moveDeadNodes(nodes []*NodeState, gossipToTheDeadTime time.Duration) int { numDead := 0 n := len(nodes) for i := 0; i < n-numDead; i++ { @@ -122,9 +127,26 @@ func moveDeadNodes(nodes []*nodeState, gossipToTheDeadTime time.Duration) int { // kRandomNodes is used to select up to k random Nodes, excluding any nodes where // the exclude function returns true. It is possible that less than k nodes are // returned. -func kRandomNodes(k int, nodes []*nodeState, exclude func(*nodeState) bool) []Node { - n := len(nodes) +func kRandomNodes(k int, nodes []*NodeState, delegate NodeSelectionDelegate, exclude func(*NodeState) bool) []Node { kNodes := make([]Node, 0, k) + + // Filter the nodes using the delegate. This allows downstream projects + // to implement custom routing logics (e.g. zone-aware gossiping). + if delegate != nil { + selected, preferred := delegate.SelectNodes(nodes) + nodes = selected + + // Add the preferred node first to guarantee it's in the result set. + if preferred != nil && k > 0 { + // Ensure it's not excluded by the filter. + if exclude == nil || !exclude(preferred) { + kNodes = append(kNodes, preferred.Node) + } + } + } + + n := len(nodes) + OUTER: // Probe up to 3*n times, with large n this is not necessary // since k << n, but with small n we want search to be @@ -141,7 +163,7 @@ OUTER: // Check if we have this node already for j := 0; j < len(kNodes); j++ { - if state.Node.Name == kNodes[j].Name { + if state.Name == kNodes[j].Name { continue OUTER } } @@ -209,7 +231,7 @@ func makeCompoundMessage(msgs [][]byte) *bytes.Buffer { // Add the message lengths for _, m := range msgs { - binary.Write(buf, binary.BigEndian, uint16(len(m))) + _ = binary.Write(buf, binary.BigEndian, uint16(len(m))) } // Append the messages @@ -261,7 +283,7 @@ func decodeCompoundMessage(buf []byte) (trunc int, parts [][]byte, err error) { // compressPayload takes an opaque input buffer, compresses it // and wraps it in a compress{} message that is encoded. -func compressPayload(inp []byte) (*bytes.Buffer, error) { +func compressPayload(inp []byte, msgpackUseNewTimeFormat bool) (*bytes.Buffer, error) { var buf bytes.Buffer compressor := lzw.NewWriter(&buf, lzw.LSB, lzwLitWidth) @@ -280,7 +302,7 @@ func compressPayload(inp []byte) (*bytes.Buffer, error) { Algo: lzwAlgo, Buf: buf.Bytes(), } - return encode(compressMsg, &c) + return encode(compressMsg, &c, msgpackUseNewTimeFormat) } // decompressPayload is used to unpack an encoded compress{} @@ -299,12 +321,14 @@ func decompressPayload(msg []byte) ([]byte, error) { func decompressBuffer(c *compress) ([]byte, error) { // Verify the algorithm if c.Algo != lzwAlgo { - return nil, fmt.Errorf("Cannot decompress unknown algorithm %d", c.Algo) + return nil, fmt.Errorf("cannot decompress unknown algorithm %d", c.Algo) } // Create a uncompressor uncomp := lzw.NewReader(bytes.NewReader(c.Buf), lzw.LSB, lzwLitWidth) - defer uncomp.Close() + defer func() { + _ = uncomp.Close() + }() // Read all the data var b bytes.Buffer diff --git a/vendor/github.com/pires/go-proxyproto/header.go b/vendor/github.com/pires/go-proxyproto/header.go index 81ebeb387eb1e..209c2ccf098b7 100644 --- a/vendor/github.com/pires/go-proxyproto/header.go +++ b/vendor/github.com/pires/go-proxyproto/header.go @@ -155,11 +155,11 @@ func (header *Header) EqualsTo(otherHeader *Header) bool { if otherHeader == nil { return false } - // TLVs only exist for version 2 - if header.Version == 2 && !bytes.Equal(header.rawTLVs, otherHeader.rawTLVs) { + if header.Version != otherHeader.Version || header.Command != otherHeader.Command || header.TransportProtocol != otherHeader.TransportProtocol { return false } - if header.Version != otherHeader.Version || header.Command != otherHeader.Command || header.TransportProtocol != otherHeader.TransportProtocol { + // TLVs only exist for version 2 + if header.Version == 2 && !bytes.Equal(header.rawTLVs, otherHeader.rawTLVs) { return false } // Return early for header with LOCAL command, which contains no address information diff --git a/vendor/github.com/pires/go-proxyproto/policy.go b/vendor/github.com/pires/go-proxyproto/policy.go index 6d505be4c8037..ebef8b98eae24 100644 --- a/vendor/github.com/pires/go-proxyproto/policy.go +++ b/vendor/github.com/pires/go-proxyproto/policy.go @@ -14,6 +14,21 @@ import ( // In case an error is returned the connection is denied. type PolicyFunc func(upstream net.Addr) (Policy, error) +// ConnPolicyFunc can be used to decide whether to trust the PROXY info +// based on connection policy options. If set, the connecting addresses +// (remote and local) are passed in as argument. +// +// See below for the different policies. +// +// In case an error is returned the connection is denied. +type ConnPolicyFunc func(connPolicyOptions ConnPolicyOptions) (Policy, error) + +// ConnPolicyOptions contains the remote and local addresses of a connection. +type ConnPolicyOptions struct { + Upstream net.Addr + Downstream net.Addr +} + // Policy defines how a connection with a PROXY header address is treated. type Policy int @@ -170,3 +185,22 @@ func ipFromAddr(upstream net.Addr) (net.IP, error) { return upstreamIP, nil } + +// IgnoreProxyHeaderNotOnInterface retuns a ConnPolicyFunc which can be used to +// decide whether to use or ignore PROXY headers depending on the connection +// being made on a specific interface. This policy can be used when the server +// is bound to multiple interfaces but wants to allow on only one interface. +func IgnoreProxyHeaderNotOnInterface(allowedIP net.IP) ConnPolicyFunc { + return func(connOpts ConnPolicyOptions) (Policy, error) { + ip, err := ipFromAddr(connOpts.Downstream) + if err != nil { + return REJECT, err + } + + if allowedIP.Equal(ip) { + return USE, nil + } + + return IGNORE, nil + } +} diff --git a/vendor/github.com/pires/go-proxyproto/protocol.go b/vendor/github.com/pires/go-proxyproto/protocol.go index 4ce16a2765bad..93cf7c40ba87d 100644 --- a/vendor/github.com/pires/go-proxyproto/protocol.go +++ b/vendor/github.com/pires/go-proxyproto/protocol.go @@ -2,6 +2,8 @@ package proxyproto import ( "bufio" + "errors" + "fmt" "io" "net" "sync" @@ -9,22 +11,33 @@ import ( "time" ) -// DefaultReadHeaderTimeout is how long header processing waits for header to -// be read from the wire, if Listener.ReaderHeaderTimeout is not set. -// It's kept as a global variable so to make it easier to find and override, -// e.g. go build -ldflags -X "github.com/pires/go-proxyproto.DefaultReadHeaderTimeout=1s" -var DefaultReadHeaderTimeout = 10 * time.Second +var ( + // DefaultReadHeaderTimeout is how long header processing waits for header to + // be read from the wire, if Listener.ReaderHeaderTimeout is not set. + // It's kept as a global variable so to make it easier to find and override, + // e.g. go build -ldflags -X "github.com/pires/go-proxyproto.DefaultReadHeaderTimeout=1s" + DefaultReadHeaderTimeout = 10 * time.Second + + // ErrInvalidUpstream should be returned when an upstream connection address + // is not trusted, and therefore is invalid. + ErrInvalidUpstream = fmt.Errorf("proxyproto: upstream connection address not trusted for PROXY information") +) // Listener is used to wrap an underlying listener, // whose connections may be using the HAProxy Proxy Protocol. // If the connection is using the protocol, the RemoteAddr() will return // the correct client address. ReadHeaderTimeout will be applied to all // connections in order to prevent blocking operations. If no ReadHeaderTimeout -// is set, a default of 200ms will be used. This can be disabled by setting the +// is set, a default of 10s will be used. This can be disabled by setting the // timeout to < 0. +// +// Only one of Policy or ConnPolicy should be provided. If both are provided then +// a panic would occur during accept. type Listener struct { - Listener net.Listener + Listener net.Listener + // Deprecated: use ConnPolicyFunc instead. This will be removed in future release. Policy PolicyFunc + ConnPolicy ConnPolicyFunc ValidateHeader Validator ReadHeaderTimeout time.Duration } @@ -38,10 +51,11 @@ type Conn struct { once sync.Once readErr error conn net.Conn - Validate Validator bufReader *bufio.Reader + reader io.Reader header *Header ProxyHeaderPolicy Policy + Validate Validator readHeaderTimeout time.Duration } @@ -58,43 +72,70 @@ func ValidateHeader(v Validator) func(*Conn) { } } -// Accept waits for and returns the next connection to the listener. -func (p *Listener) Accept() (net.Conn, error) { - // Get the underlying connection - conn, err := p.Listener.Accept() - if err != nil { - return nil, err +// SetReadHeaderTimeout sets the readHeaderTimeout for a connection when passed as option to NewConn() +func SetReadHeaderTimeout(t time.Duration) func(*Conn) { + return func(c *Conn) { + if t >= 0 { + c.readHeaderTimeout = t + } } +} - proxyHeaderPolicy := USE - if p.Policy != nil { - proxyHeaderPolicy, err = p.Policy(conn.RemoteAddr()) +// Accept waits for and returns the next valid connection to the listener. +func (p *Listener) Accept() (net.Conn, error) { + for { + // Get the underlying connection + conn, err := p.Listener.Accept() if err != nil { - // can't decide the policy, we can't accept the connection - conn.Close() return nil, err } - // Handle a connection as a regular one - if proxyHeaderPolicy == SKIP { - return conn, nil + + proxyHeaderPolicy := USE + if p.Policy != nil && p.ConnPolicy != nil { + panic("only one of policy or connpolicy must be provided.") } - } + if p.Policy != nil || p.ConnPolicy != nil { + if p.Policy != nil { + proxyHeaderPolicy, err = p.Policy(conn.RemoteAddr()) + } else { + proxyHeaderPolicy, err = p.ConnPolicy(ConnPolicyOptions{ + Upstream: conn.RemoteAddr(), + Downstream: conn.LocalAddr(), + }) + } + if err != nil { + // can't decide the policy, we can't accept the connection + conn.Close() - newConn := NewConn( - conn, - WithPolicy(proxyHeaderPolicy), - ValidateHeader(p.ValidateHeader), - ) + if errors.Is(err, ErrInvalidUpstream) { + // keep listening for other connections + continue + } - // If the ReadHeaderTimeout for the listener is unset, use the default timeout. - if p.ReadHeaderTimeout == 0 { - p.ReadHeaderTimeout = DefaultReadHeaderTimeout - } + return nil, err + } + // Handle a connection as a regular one + if proxyHeaderPolicy == SKIP { + return conn, nil + } + } + + newConn := NewConn( + conn, + WithPolicy(proxyHeaderPolicy), + ValidateHeader(p.ValidateHeader), + ) + + // If the ReadHeaderTimeout for the listener is unset, use the default timeout. + if p.ReadHeaderTimeout == 0 { + p.ReadHeaderTimeout = DefaultReadHeaderTimeout + } - // Set the readHeaderTimeout of the new conn to the value of the listener - newConn.readHeaderTimeout = p.ReadHeaderTimeout + // Set the readHeaderTimeout of the new conn to the value of the listener + newConn.readHeaderTimeout = p.ReadHeaderTimeout - return newConn, nil + return newConn, nil + } } // Close closes the underlying listener. @@ -110,8 +151,15 @@ func (p *Listener) Addr() net.Addr { // NewConn is used to wrap a net.Conn that may be speaking // the proxy protocol into a proxyproto.Conn func NewConn(conn net.Conn, opts ...func(*Conn)) *Conn { + // For v1 the header length is at most 108 bytes. + // For v2 the header length is at most 52 bytes plus the length of the TLVs. + // We use 256 bytes to be safe. + const bufSize = 256 + br := bufio.NewReaderSize(conn, bufSize) + pConn := &Conn{ - bufReader: bufio.NewReader(conn), + bufReader: br, + reader: io.MultiReader(br, conn), conn: conn, } @@ -133,7 +181,7 @@ func (p *Conn) Read(b []byte) (int, error) { return 0, p.readErr } - return p.bufReader.Read(b) + return p.reader.Read(b) } // Write wraps original conn.Write @@ -315,5 +363,27 @@ func (p *Conn) WriteTo(w io.Writer) (int64, error) { if p.readErr != nil { return 0, p.readErr } - return p.bufReader.WriteTo(w) + + b := make([]byte, p.bufReader.Buffered()) + if _, err := p.bufReader.Read(b); err != nil { + return 0, err // this should never as we read buffered data + } + + var n int64 + { + nn, err := w.Write(b) + n += int64(nn) + if err != nil { + return n, err + } + } + { + nn, err := io.Copy(w, p.conn) + n += nn + if err != nil { + return n, err + } + } + + return n, nil } diff --git a/vendor/github.com/sercand/kuberesolver/v6/README.md b/vendor/github.com/sercand/kuberesolver/v6/README.md index d86432afb6bda..60ff1c63c8bf4 100644 --- a/vendor/github.com/sercand/kuberesolver/v6/README.md +++ b/vendor/github.com/sercand/kuberesolver/v6/README.md @@ -1,68 +1,68 @@ -# kuberesolver - -A Grpc name resolver by using kubernetes API. -It comes with a small ~250 LOC kubernetes client to find service endpoints. Therefore it won't bloat your binaries. - - -### USAGE - -```go - -// Import the module -import "github.com/sercand/kuberesolver/v6" - -// Register kuberesolver to grpc before calling grpc.Dial -kuberesolver.RegisterInCluster() - -// it is same as -resolver.Register(kuberesolver.NewBuilder(nil /*custom kubernetes client*/ , "kubernetes")) - -// if schema is 'kubernetes' then grpc will use kuberesolver to resolve addresses -cc, err := grpc.Dial("kubernetes:///service.namespace:portname", opts...) -``` - -An url can be one of the following, [grpc naming docs](https://github.com/grpc/grpc/blob/master/doc/naming.md) - -``` -kubernetes:///service-name:8080 -kubernetes:///service-name:portname -kubernetes:///service-name.namespace:8080 -kubernetes:///service-name.namespace.svc.cluster_name -kubernetes:///service-name.namespace.svc.cluster_name:8080 - -kubernetes://namespace/service-name:8080 -kubernetes://service-name:8080/ -kubernetes://service-name.namespace:8080/ -kubernetes://service-name.namespace.svc.cluster_name -kubernetes://service-name.namespace.svc.cluster_name:8080 -``` -_* Please note that the cluster_name is not used in resolving the endpoints of a Service. It is only there to support fully qualified service names, e.g._ `test.default.svc.cluster.local`. - -### Using alternative Schema - -Use `RegisterInClusterWithSchema(schema)` instead of `RegisterInCluster` on start. - -### Client Side Load Balancing - -You need to pass grpc.WithBalancerName option to grpc on dial: - -```go -grpc.DialContext(ctx, "kubernetes:///service:grpc", grpc.WithBalancerName("round_robin"), grpc.WithInsecure()) -``` -This will create subconnections for each available service endpoints. - -### How is this different from dialing to `service.namespace:8080` - -Connecting to a service by dialing to `service.namespace:8080` uses DNS and it returns service stable IP. Therefore, gRPC doesn't know the endpoint IP addresses and it fails to reconnect to target services in case of failure. - -Kuberesolver uses kubernetes API to get and watch service endpoint IP addresses. -Since it provides and updates all available service endpoints, together with a client-side balancer you can achive zero downtime deployments. - -### RBAC - -You need give `GET` and `WATCH` access to the `endpointslices` if you are using RBAC in your cluster. - - -### Using With TLS - -You need to a certificate with name `service-name.namespace` in order to connect with TLS to your services. +# kuberesolver + +A Grpc name resolver by using kubernetes API. +It comes with a small ~250 LOC kubernetes client to find service endpoints. Therefore it won't bloat your binaries. + + +### USAGE + +```go + +// Import the module +import "github.com/sercand/kuberesolver/v6" + +// Register kuberesolver to grpc before calling grpc.Dial +kuberesolver.RegisterInCluster() + +// it is same as +resolver.Register(kuberesolver.NewBuilder(nil /*custom kubernetes client*/ , "kubernetes")) + +// if schema is 'kubernetes' then grpc will use kuberesolver to resolve addresses +cc, err := grpc.Dial("kubernetes:///service.namespace:portname", opts...) +``` + +An url can be one of the following, [grpc naming docs](https://github.com/grpc/grpc/blob/master/doc/naming.md) + +``` +kubernetes:///service-name:8080 +kubernetes:///service-name:portname +kubernetes:///service-name.namespace:8080 +kubernetes:///service-name.namespace.svc.cluster_name +kubernetes:///service-name.namespace.svc.cluster_name:8080 + +kubernetes://namespace/service-name:8080 +kubernetes://service-name:8080/ +kubernetes://service-name.namespace:8080/ +kubernetes://service-name.namespace.svc.cluster_name +kubernetes://service-name.namespace.svc.cluster_name:8080 +``` +_* Please note that the cluster_name is not used in resolving the endpoints of a Service. It is only there to support fully qualified service names, e.g._ `test.default.svc.cluster.local`. + +### Using alternative Schema + +Use `RegisterInClusterWithSchema(schema)` instead of `RegisterInCluster` on start. + +### Client Side Load Balancing + +You need to pass grpc.WithBalancerName option to grpc on dial: + +```go +grpc.DialContext(ctx, "kubernetes:///service:grpc", grpc.WithBalancerName("round_robin"), grpc.WithInsecure()) +``` +This will create subconnections for each available service endpoints. + +### How is this different from dialing to `service.namespace:8080` + +Connecting to a service by dialing to `service.namespace:8080` uses DNS and it returns service stable IP. Therefore, gRPC doesn't know the endpoint IP addresses and it fails to reconnect to target services in case of failure. + +Kuberesolver uses kubernetes API to get and watch service endpoint IP addresses. +Since it provides and updates all available service endpoints, together with a client-side balancer you can achive zero downtime deployments. + +### RBAC + +You need give `GET` and `WATCH` access to the `endpointslices` if you are using RBAC in your cluster. + + +### Using With TLS + +You need to a certificate with name `service-name.namespace` in order to connect with TLS to your services. diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go index ca82f765c990a..42221f4b90a74 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows -// +build !windows package fileutil @@ -21,7 +20,7 @@ import "os" const ( // PrivateDirMode grants owner to make/remove files inside the directory. - PrivateDirMode = 0700 + PrivateDirMode = 0o700 ) // OpenDir opens a directory for syncing. diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go index 849c63c8769c9..0cb2280cd8621 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build windows -// +build windows package fileutil @@ -24,7 +23,7 @@ import ( const ( // PrivateDirMode grants owner to make/remove files inside the directory. - PrivateDirMode = 0777 + PrivateDirMode = 0o777 ) // OpenDir opens a directory in windows with write access for syncing. diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go new file mode 100644 index 0000000000000..55248888c6034 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go @@ -0,0 +1,60 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "bufio" + "io" + "io/fs" + "os" +) + +// FileReader is a wrapper of io.Reader. It also provides file info. +type FileReader interface { + io.Reader + FileInfo() (fs.FileInfo, error) +} + +type fileReader struct { + *os.File +} + +func NewFileReader(f *os.File) FileReader { + return &fileReader{f} +} + +func (fr *fileReader) FileInfo() (fs.FileInfo, error) { + return fr.Stat() +} + +// FileBufReader is a wrapper of bufio.Reader. It also provides file info. +type FileBufReader struct { + *bufio.Reader + fi fs.FileInfo +} + +func NewFileBufReader(fr FileReader) *FileBufReader { + bufReader := bufio.NewReader(fr) + fi, err := fr.FileInfo() + if err != nil { + // This should never happen. + panic(err) + } + return &FileBufReader{bufReader, fi} +} + +func (fbr *FileBufReader) FileInfo() fs.FileInfo { + return fbr.fi +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go index e442c3c92e83e..36394a375a688 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go @@ -17,16 +17,18 @@ package fileutil import ( "fmt" "io" - "io/ioutil" + "io/fs" "os" "path/filepath" "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/verify" ) const ( // PrivateFileMode grants owner to read/write a file. - PrivateFileMode = 0600 + PrivateFileMode = 0o600 ) // IsDirWriteable checks if dir is writable by writing and removing a file @@ -36,7 +38,7 @@ func IsDirWriteable(dir string) error { if err != nil { return err } - if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + if err := os.WriteFile(f, []byte(""), PrivateFileMode); err != nil { return err } return os.Remove(f) @@ -44,16 +46,13 @@ func IsDirWriteable(dir string) error { // TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory // does not exists. TouchDirAll also ensures the given directory is writable. -func TouchDirAll(dir string) error { +func TouchDirAll(lg *zap.Logger, dir string) error { + verify.Assert(lg != nil, "nil log isn't allowed") // If path is already a directory, MkdirAll does nothing and returns nil, so, - // first check if dir exist with an expected permission mode. + // first check if dir exists with an expected permission mode. if Exist(dir) { err := CheckDirPermission(dir, PrivateDirMode) if err != nil { - lg, _ := zap.NewProduction() - if lg == nil { - lg = zap.NewExample() - } lg.Warn("check file permission", zap.Error(err)) } } else { @@ -70,8 +69,8 @@ func TouchDirAll(dir string) error { // CreateDirAll is similar to TouchDirAll but returns error // if the deepest directory was not empty. -func CreateDirAll(dir string) error { - err := TouchDirAll(dir) +func CreateDirAll(lg *zap.Logger, dir string) error { + err := TouchDirAll(lg, dir) if err == nil { var ns []string ns, err = ReadDir(dir) @@ -126,7 +125,7 @@ func CheckDirPermission(dir string, perm os.FileMode) error { if !Exist(dir) { return fmt.Errorf("directory %q empty, cannot check permission", dir) } - //check the existing permission on the directory + // check the existing permission on the directory dirInfo, err := os.Stat(dir) if err != nil { return err @@ -161,7 +160,6 @@ func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string) lg.Error("remove file failed", zap.String("file", file), zap.Error(err)) - continue } } } @@ -170,3 +168,16 @@ func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string) } return nil } + +// ListFiles lists files if matchFunc is true on an existing dir +// Returns error if the dir does not exist +func ListFiles(dir string, matchFunc func(fileName string) bool) ([]string, error) { + var files []string + err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error { + if matchFunc(path) { + files = append(files, path) + } + return nil + }) + return files, err +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go index 338627f43c88d..dd2fa545d227c 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go @@ -19,8 +19,6 @@ import ( "os" ) -var ( - ErrLocked = errors.New("fileutil: file already locked") -) +var ErrLocked = errors.New("fileutil: file already locked") type LockedFile struct{ *os.File } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go index dcdf226cdbfde..178c987a4a396 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go @@ -13,11 +13,11 @@ // limitations under the License. //go:build !windows && !plan9 && !solaris -// +build !windows,!plan9,!solaris package fileutil import ( + "errors" "os" "syscall" ) @@ -29,7 +29,7 @@ func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, err } if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { f.Close() - if err == syscall.EWOULDBLOCK { + if errors.Is(err, syscall.EWOULDBLOCK) { err = ErrLocked } return nil, err diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go index d8952cc481b05..609ac397849b9 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go @@ -13,11 +13,11 @@ // limitations under the License. //go:build linux -// +build linux package fileutil import ( + "errors" "fmt" "io" "os" @@ -59,13 +59,13 @@ func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { - return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%v)", path, err) + return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%w)", path, err) } flock := wrlck if err = syscall.FcntlFlock(f.Fd(), unix.F_OFD_SETLK, &flock); err != nil { f.Close() - if err == syscall.EWOULDBLOCK { + if errors.Is(err, syscall.EWOULDBLOCK) { err = ErrLocked } return nil, err @@ -80,7 +80,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { - return nil, fmt.Errorf("ofdLockFile failed to open %q (%v)", path, err) + return nil, fmt.Errorf("ofdLockFile failed to open %q (%w)", path, err) } flock := wrlck diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go index 683cc1db9c417..2e892fecc6546 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build solaris -// +build solaris package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go index d89027e1fad60..05db53674105a 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows && !plan9 && !solaris && !linux -// +build !windows,!plan9,!solaris,!linux package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go index 5cbf2bc3d5e86..51010bdf81ce7 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build windows -// +build windows package fileutil @@ -22,31 +21,18 @@ import ( "fmt" "os" "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file") + "golang.org/x/sys/windows" ) -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - LOCKFILE_EXCLUSIVE_LOCK = 2 - LOCKFILE_FAIL_IMMEDIATELY = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) +var errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file") func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := open(path, flag, perm) if err != nil { return nil, err } - if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + if err := lockFile(windows.Handle(f.Fd()), windows.LOCKFILE_FAIL_IMMEDIATELY); err != nil { f.Close() return nil, err } @@ -58,7 +44,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { if err != nil { return nil, err } - if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + if err := lockFile(windows.Handle(f.Fd()), 0); err != nil { f.Close() return nil, err } @@ -67,7 +53,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func open(path string, flag int, perm os.FileMode) (*os.File, error) { if path == "" { - return nil, fmt.Errorf("cannot open empty filename") + return nil, errors.New("cannot open empty filename") } var access uint32 switch flag { @@ -95,32 +81,17 @@ func open(path string, flag int, perm os.FileMode) (*os.File, error) { return os.NewFile(uintptr(fd), path), nil } -func lockFile(fd syscall.Handle, flags uint32) error { - var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK - flag |= flags - if fd == syscall.InvalidHandle { +func lockFile(fd windows.Handle, flags uint32) error { + if fd == windows.InvalidHandle { return nil } - err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + err := windows.LockFileEx(fd, flags|windows.LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &windows.Overlapped{}) if err == nil { return nil } else if err.Error() == errLocked.Error() { return ErrLocked - } else if err != errLockViolation { + } else if err != windows.ERROR_LOCK_VIOLATION { return err } return nil } - -func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - var reserved uint32 = 0 - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return err -} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go index c747b7cf81f93..aadbff7131d7f 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go @@ -19,9 +19,9 @@ import ( "os" ) -// Preallocate tries to allocate the space for given -// file. This operation is only supported on linux by a -// few filesystems (btrfs, ext4, etc.). +// Preallocate tries to allocate the space for given file. This +// operation is only supported on darwin and linux by a few +// filesystems (APFS, btrfs, ext4, etc.). // If the operation is unsupported, no error will be returned. // Otherwise, the error encountered will be returned. func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go index caab143dd301e..72430ec273b8f 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go @@ -13,11 +13,11 @@ // limitations under the License. //go:build darwin -// +build darwin package fileutil import ( + "errors" "os" "syscall" @@ -40,7 +40,7 @@ func preallocFixed(f *os.File, sizeInBytes int64) error { Length: sizeInBytes, } err := unix.FcntlFstore(f.Fd(), unix.F_PREALLOCATE, fstore) - if err == nil || err == unix.ENOTSUP { + if err == nil || errors.Is(err, unix.ENOTSUP) { return nil } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go index ebb8207c3408d..b0a8166ae144f 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go @@ -13,11 +13,11 @@ // limitations under the License. //go:build linux -// +build linux package fileutil import ( + "errors" "os" "syscall" ) @@ -26,10 +26,10 @@ func preallocExtend(f *os.File, sizeInBytes int64) error { // use mode = 0 to change size err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) if err != nil { - errno, ok := err.(syscall.Errno) + var errno syscall.Errno // not supported; fallback // fallocate EINTRs frequently in some environments; fallback - if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + if errors.As(err, &errno) && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { return preallocExtendTrunc(f, sizeInBytes) } } @@ -40,9 +40,9 @@ func preallocFixed(f *os.File, sizeInBytes int64) error { // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) if err != nil { - errno, ok := err.(syscall.Errno) + var errno syscall.Errno // treat not supported as nil error - if ok && errno == syscall.ENOTSUP { + if errors.As(err, &errno) && errno == syscall.ENOTSUP { return nil } } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go index 2c46dd4907553..e7fd937a43696 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !linux && !darwin -// +build !linux,!darwin package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go index e8ac0ca6f58a2..026ea03230fa3 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go @@ -17,7 +17,6 @@ package fileutil import ( "os" "path/filepath" - "sort" "strings" "time" @@ -25,61 +24,74 @@ import ( ) func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { - return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil) + return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil, true) } func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { doneC := make(chan struct{}) - errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, true) + return doneC, errC +} + +func PurgeFileWithoutFlock(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { + doneC := make(chan struct{}) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, false) return doneC, errC } // purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. // if donec is non-nil, the function closes it to notify its exit. -func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error { +func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}, flock bool) <-chan error { if lg == nil { lg = zap.NewNop() } errC := make(chan error, 1) + lg.Info("started to purge file", + zap.String("dir", dirname), + zap.String("suffix", suffix), + zap.Uint("max", max), + zap.Duration("interval", interval)) + go func() { if donec != nil { defer close(donec) } for { - fnames, err := ReadDir(dirname) + fnamesWithSuffix, err := readDirWithSuffix(dirname, suffix) if err != nil { errC <- err return } - newfnames := make([]string, 0) - for _, fname := range fnames { - if strings.HasSuffix(fname, suffix) { - newfnames = append(newfnames, fname) - } - } - sort.Strings(newfnames) - fnames = newfnames - for len(newfnames) > int(max) { - f := filepath.Join(dirname, newfnames[0]) - l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) - if err != nil { - break + nPurged := 0 + for nPurged < len(fnamesWithSuffix)-int(max) { + f := filepath.Join(dirname, fnamesWithSuffix[nPurged]) + var l *LockedFile + if flock { + l, err = TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err)) + break + } } if err = os.Remove(f); err != nil { + lg.Error("failed to remove file", zap.String("path", f), zap.Error(err)) errC <- err return } - if err = l.Close(); err != nil { - lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) - errC <- err - return + if flock { + if err = l.Close(); err != nil { + lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) + errC <- err + return + } } lg.Info("purged", zap.String("path", f)) - newfnames = newfnames[1:] + nPurged++ } + if purgec != nil { - for i := 0; i < len(fnames)-len(newfnames); i++ { - purgec <- fnames[i] + for i := 0; i < nPurged; i++ { + purgec <- fnamesWithSuffix[i] } } select { @@ -91,3 +103,18 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval }() return errC } + +func readDirWithSuffix(dirname string, suffix string) ([]string, error) { + fnames, err := ReadDir(dirname) + if err != nil { + return nil, err + } + // filter in place (ref. https://go.dev/wiki/SliceTricks#filtering-without-allocating) + fnamesWithSuffix := fnames[:0] + for _, fname := range fnames { + if strings.HasSuffix(fname, suffix) { + fnamesWithSuffix = append(fnamesWithSuffix, fname) + } + } + return fnamesWithSuffix, nil +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go index 0a0855309e9ef..670d01fadcc27 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !linux && !darwin -// +build !linux,!darwin package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go index 1923b276ea072..7affa78ea6404 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build darwin -// +build darwin package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go index b9398c23f9476..a3172382e5ae9 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build linux -// +build linux package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go new file mode 100644 index 0000000000000..286d385ba3990 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go @@ -0,0 +1,42 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import "fmt" + +const ( + JSONLogFormat = "json" + ConsoleLogFormat = "console" + //revive:disable:var-naming + // Deprecated: Please use JSONLogFormat. + JsonLogFormat = JSONLogFormat + //revive:enable:var-naming +) + +var DefaultLogFormat = JSONLogFormat + +// ConvertToZapFormat converts and validated log format string. +func ConvertToZapFormat(format string) (string, error) { + switch format { + case ConsoleLogFormat: + return ConsoleLogFormat, nil + case JSONLogFormat: + return JSONLogFormat, nil + case "": + return DefaultLogFormat, nil + default: + return "", fmt.Errorf("unknown log format: %s, supported values json, console", format) + } +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go index d7fd0d90dbd1c..befa5758475ec 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go @@ -15,7 +15,8 @@ package logutil import ( - "sort" + "slices" + "time" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -42,19 +43,24 @@ var DefaultZapLoggerConfig = zap.Config{ Thereafter: 100, }, - Encoding: "json", + Encoding: DefaultLogFormat, // copied from "zap.NewProductionEncoderConfig" with some updates EncoderConfig: zapcore.EncoderConfig{ - TimeKey: "ts", - LevelKey: "level", - NameKey: "logger", - CallerKey: "caller", - MessageKey: "msg", - StacktraceKey: "stacktrace", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + + // Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps + EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02T15:04:05.000000Z0700")) + }, + EncodeDuration: zapcore.StringDurationEncoder, EncodeCaller: zapcore.ShortCallerEncoder, }, @@ -66,37 +72,22 @@ var DefaultZapLoggerConfig = zap.Config{ // MergeOutputPaths merges logging output paths, resolving conflicts. func MergeOutputPaths(cfg zap.Config) zap.Config { - outputs := make(map[string]struct{}) - for _, v := range cfg.OutputPaths { - outputs[v] = struct{}{} - } - outputSlice := make([]string, 0) - if _, ok := outputs["/dev/null"]; ok { - // "/dev/null" to discard all - outputSlice = []string{"/dev/null"} - } else { - for k := range outputs { - outputSlice = append(outputSlice, k) - } - } - cfg.OutputPaths = outputSlice - sort.Strings(cfg.OutputPaths) + cfg.OutputPaths = mergePaths(cfg.OutputPaths) + cfg.ErrorOutputPaths = mergePaths(cfg.ErrorOutputPaths) + return cfg +} - errOutputs := make(map[string]struct{}) - for _, v := range cfg.ErrorOutputPaths { - errOutputs[v] = struct{}{} +func mergePaths(old []string) []string { + if len(old) == 0 { + // the original implementation ensures the result is non-nil + return []string{} } - errOutputSlice := make([]string, 0) - if _, ok := errOutputs["/dev/null"]; ok { - // "/dev/null" to discard all - errOutputSlice = []string{"/dev/null"} - } else { - for k := range errOutputs { - errOutputSlice = append(errOutputSlice, k) - } + // use "/dev/null" to discard all + if slices.Contains(old, "/dev/null") { + return []string{"/dev/null"} } - cfg.ErrorOutputPaths = errOutputSlice - sort.Strings(cfg.ErrorOutputPaths) - - return cfg + // clone a new one; don't modify the original, in case it matters. + dup := slices.Clone(old) + slices.Sort(dup) + return slices.Compact(dup) } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go index 9daa3e0aab1d5..06dc40dacd985 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows -// +build !windows package logutil @@ -25,10 +24,10 @@ import ( "os" "path/filepath" - "go.etcd.io/etcd/client/pkg/v3/systemd" - "github.com/coreos/go-systemd/v22/journal" "go.uber.org/zap/zapcore" + + "go.etcd.io/etcd/client/pkg/v3/systemd" ) // NewJournalWriter wraps "io.Writer" to redirect log output diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go index f278a61f8a04b..e1f21755d4b71 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go @@ -14,7 +14,10 @@ package tlsutil -import "crypto/tls" +import ( + "crypto/tls" + "fmt" +) // GetCipherSuite returns the corresponding cipher suite, // and boolean value if it is supported. @@ -37,3 +40,17 @@ func GetCipherSuite(s string) (uint16, bool) { } return 0, false } + +// GetCipherSuites returns list of corresponding cipher suite IDs. +func GetCipherSuites(ss []string) ([]uint16, error) { + cs := make([]uint16, len(ss)) + for i, s := range ss { + var ok bool + cs[i], ok = GetCipherSuite(s) + if !ok { + return nil, fmt.Errorf("unexpected TLS cipher suite %q", s) + } + } + + return cs, nil +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go index 3a5aef089a784..0f79865e805ed 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go @@ -18,7 +18,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" - "io/ioutil" + "os" ) // NewCertPool creates x509 certPool with provided CA files. @@ -26,7 +26,7 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { certPool := x509.NewCertPool() for _, CAFile := range CAFiles { - pemByte, err := ioutil.ReadFile(CAFile) + pemByte, err := os.ReadFile(CAFile) if err != nil { return nil, err } @@ -51,12 +51,12 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { // NewCert generates TLS cert by using the given cert,key and parse function. func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { - cert, err := ioutil.ReadFile(certfile) + cert, err := os.ReadFile(certfile) if err != nil { return nil, err } - key, err := ioutil.ReadFile(keyfile) + key, err := os.ReadFile(keyfile) if err != nil { return nil, err } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go new file mode 100644 index 0000000000000..ffcecd8c670f8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go @@ -0,0 +1,47 @@ +// Copyright 2023 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "crypto/tls" + "fmt" +) + +type TLSVersion string + +// Constants for TLS versions. +const ( + TLSVersionDefault TLSVersion = "" + TLSVersion12 TLSVersion = "TLS1.2" + TLSVersion13 TLSVersion = "TLS1.3" +) + +// GetTLSVersion returns the corresponding tls.Version or error. +func GetTLSVersion(version string) (uint16, error) { + var v uint16 + + switch version { + case string(TLSVersionDefault): + v = 0 // 0 means let Go decide. + case string(TLSVersion12): + v = tls.VersionTLS12 + case string(TLSVersion13): + v = tls.VersionTLS13 + default: + return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version) + } + + return v, nil +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go index 4ff8e7f0010ca..d43ac4f078aba 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go @@ -16,31 +16,35 @@ package transport import ( "crypto/tls" + "errors" "fmt" "net" "time" ) -type keepAliveConn interface { - SetKeepAlive(bool) error - SetKeepAlivePeriod(d time.Duration) error -} - // NewKeepAliveListener returns a listener that listens on the given address. // Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. // Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. // http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html +// +// Note(ahrtr): +// only `net.TCPConn` supports `SetKeepAlive` and `SetKeepAlivePeriod` +// by default, so if you want to wrap multiple layers of net.Listener, +// the `keepaliveListener` should be the one which is closest to the +// original `net.Listener` implementation, namely `TCPListener`. func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { + kal := &keepaliveListener{ + Listener: l, + } + if scheme == "https" { if tlscfg == nil { - return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") + return nil, errors.New("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") } - return newTLSKeepaliveListener(l, tlscfg), nil + return newTLSKeepaliveListener(kal, tlscfg), nil } - return &keepaliveListener{ - Listener: l, - }, nil + return kal, nil } type keepaliveListener struct{ net.Listener } @@ -50,13 +54,38 @@ func (kln *keepaliveListener) Accept() (net.Conn, error) { if err != nil { return nil, err } - kac := c.(keepAliveConn) + + kac, err := createKeepaliveConn(c) + if err != nil { + return nil, fmt.Errorf("create keepalive connection failed, %w", err) + } // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl // default on linux: 30 + 8 * 30 // default on osx: 30 + 8 * 75 - kac.SetKeepAlive(true) - kac.SetKeepAlivePeriod(30 * time.Second) - return c, nil + if err := kac.SetKeepAlive(true); err != nil { + return nil, fmt.Errorf("SetKeepAlive failed, %w", err) + } + if err := kac.SetKeepAlivePeriod(30 * time.Second); err != nil { + return nil, fmt.Errorf("SetKeepAlivePeriod failed, %w", err) + } + return kac, nil +} + +func createKeepaliveConn(c net.Conn) (*keepAliveConn, error) { + tcpc, ok := c.(*net.TCPConn) + if !ok { + return nil, ErrNotTCP + } + return &keepAliveConn{tcpc}, nil +} + +type keepAliveConn struct { + *net.TCPConn +} + +// SetKeepAlive sets keepalive +func (l *keepAliveConn) SetKeepAlive(doKeepAlive bool) error { + return l.TCPConn.SetKeepAlive(doKeepAlive) } // A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. @@ -67,22 +96,17 @@ type tlsKeepaliveListener struct { // Accept waits for and returns the next incoming TLS connection. // The returned connection c is a *tls.Conn. -func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() +func (l *tlsKeepaliveListener) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() if err != nil { - return + return nil, err } - kac := c.(keepAliveConn) - // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl - // default on linux: 30 + 8 * 30 - // default on osx: 30 + 8 * 75 - kac.SetKeepAlive(true) - kac.SetKeepAlivePeriod(30 * time.Second) + c = tls.Server(c, l.config) return c, nil } -// NewListener creates a Listener which accepts connections from an inner +// newTLSKeepaliveListener creates a Listener which accepts connections from an inner // Listener and wraps each connection with Server. // The configuration config must be non-nil and must have // at least one certificate. diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_openbsd.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_openbsd.go new file mode 100644 index 0000000000000..024c6c23639f0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_openbsd.go @@ -0,0 +1,26 @@ +// Copyright 2023 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build openbsd + +package transport + +import "time" + +// SetKeepAlivePeriod sets keepalive period +func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error { + // OpenBSD has no user-settable per-socket TCP keepalive options. + // Refer to https://github.com/etcd-io/etcd/issues/15811. + return nil +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_unix.go new file mode 100644 index 0000000000000..08061f7267b21 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_unix.go @@ -0,0 +1,24 @@ +// Copyright 2023 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !openbsd + +package transport + +import "time" + +// SetKeepAlivePeriod sets keepalive period +func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error { + return l.TCPConn.SetKeepAlivePeriod(d) +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go index 930c542066f81..bf4c4e104a277 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go @@ -23,9 +23,7 @@ import ( "time" ) -var ( - ErrNotTCP = errors.New("only tcp connections have keepalive") -) +var ErrNotTCP = errors.New("only tcp connections have keepalive") // LimitListener returns a Listener that accepts at most n simultaneous // connections from the provided Listener. @@ -63,6 +61,9 @@ func (l *limitListenerConn) Close() error { return err } +// SetKeepAlive sets keepalive +// +// Deprecated: use (*keepAliveConn) SetKeepAlive instead. func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { tcpc, ok := l.Conn.(*net.TCPConn) if !ok { @@ -71,6 +72,9 @@ func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { return tcpc.SetKeepAlive(doKeepAlive) } +// SetKeepAlivePeriod sets keepalive period +// +// Deprecated: use (*keepAliveConn) SetKeepAlivePeriod instead. func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { tcpc, ok := l.Conn.(*net.TCPConn) if !ok { diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go index 992c773eaacc9..9c2d29ba998ec 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go @@ -32,10 +32,11 @@ import ( "strings" "time" + "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/client/pkg/v3/tlsutil" - - "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/verify" ) // NewListener creates a new listner. @@ -43,7 +44,7 @@ func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err err return newListener(addr, scheme, WithTLSInfo(tlsinfo)) } -// NewListenerWithOpts creates a new listener which accpets listener options. +// NewListenerWithOpts creates a new listener which accepts listener options. func NewListenerWithOpts(addr, scheme string, opts ...ListenerOption) (net.Listener, error) { return newListener(addr, scheme, opts...) } @@ -59,16 +60,12 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err switch { case lnOpts.IsSocketOpts(): // new ListenConfig with socket options. - config, err := newListenConfig(lnOpts.socketOpts) - if err != nil { - return nil, err - } - lnOpts.ListenConfig = config + lnOpts.ListenConfig = newListenConfig(lnOpts.socketOpts) // check for timeout fallthrough case lnOpts.IsTimeout(), lnOpts.IsSocketOpts(): // timeout listener with socket options. - ln, err := lnOpts.ListenConfig.Listen(context.TODO(), "tcp", addr) + ln, err := newKeepAliveListener(&lnOpts.ListenConfig, addr) if err != nil { return nil, err } @@ -78,7 +75,7 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err writeTimeout: lnOpts.writeTimeout, } case lnOpts.IsTimeout(): - ln, err := net.Listen("tcp", addr) + ln, err := newKeepAliveListener(nil, addr) if err != nil { return nil, err } @@ -88,7 +85,7 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err writeTimeout: lnOpts.writeTimeout, } default: - ln, err := net.Listen("tcp", addr) + ln, err := newKeepAliveListener(nil, addr) if err != nil { return nil, err } @@ -102,6 +99,22 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err return wrapTLS(scheme, lnOpts.tlsInfo, lnOpts.Listener) } +func newKeepAliveListener(cfg *net.ListenConfig, addr string) (net.Listener, error) { + var ln net.Listener + var err error + + if cfg != nil { + ln, err = cfg.Listen(context.TODO(), "tcp", addr) + } else { + ln, err = net.Listen("tcp", addr) + } + if err != nil { + return nil, err + } + + return NewKeepAliveListener(ln, "tcp", nil) +} + func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { if scheme != "https" && scheme != "unixs" { return l, nil @@ -112,7 +125,7 @@ func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, err return newTLSListener(l, tlsinfo, checkSAN) } -func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) { +func newListenConfig(sopts *SocketOpts) net.ListenConfig { lc := net.ListenConfig{} if sopts != nil { ctls := getControls(sopts) @@ -120,7 +133,7 @@ func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) { lc.Control = ctls.Control } } - return lc, nil + return lc } type TLSInfo struct { @@ -152,6 +165,14 @@ type TLSInfo struct { // Note that cipher suites are prioritized in the given order. CipherSuites []uint16 + // MinVersion is the minimum TLS version that is acceptable. + // If not set, the minimum version is TLS 1.2. + MinVersion uint16 + + // MaxVersion is the maximum TLS version that is acceptable. + // If not set, the default used by Go is selected (see tls.Config.MaxVersion). + MaxVersion uint16 + selfCert bool // parseFunc exists to simplify testing. Typically, parseFunc @@ -159,12 +180,23 @@ type TLSInfo struct { parseFunc func([]byte, []byte) (tls.Certificate, error) // AllowedCN is a CN which must be provided by a client. + // + // Deprecated: use AllowedCNs instead. AllowedCN string // AllowedHostname is an IP address or hostname that must match the TLS // certificate provided by a client. + // + // Deprecated: use AllowedHostnames instead. AllowedHostname string + // AllowedCNs is a list of acceptable CNs which must be provided by a client. + AllowedCNs []string + + // AllowedHostnames is a list of acceptable IP addresses or hostnames that must match the + // TLS certificate provided by a client. + AllowedHostnames []string + // Logger logs TLS errors. // If nil, all logs are discarded. Logger *zap.Logger @@ -172,6 +204,9 @@ type TLSInfo struct { // EmptyCN indicates that the cert must have empty CN. // If true, ClientConfig() will return an error for a cert with non empty CN. EmptyCN bool + + // LocalAddr is the local IP address to use when communicating with a peer. + LocalAddr string } func (info TLSInfo) String() string { @@ -182,34 +217,35 @@ func (info TLSInfo) Empty() bool { return info.CertFile == "" && info.KeyFile == "" } -func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { - info.Logger = lg +func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (TLSInfo, error) { + verify.Assert(lg != nil, "nil log isn't allowed") + + var err error + info := TLSInfo{Logger: lg} if selfSignedCertValidity == 0 { - err = fmt.Errorf("selfSignedCertValidity is invalid,it should be greater than 0") + err = errors.New("selfSignedCertValidity is invalid,it should be greater than 0") info.Logger.Warn( "cannot generate cert", zap.Error(err), ) - return + return info, err } - err = fileutil.TouchDirAll(dirpath) + err = fileutil.TouchDirAll(lg, dirpath) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot create cert directory", - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot create cert directory", + zap.Error(err), + ) + return info, err } certPath, err := filepath.Abs(filepath.Join(dirpath, "cert.pem")) if err != nil { - return + return info, err } keyPath, err := filepath.Abs(filepath.Join(dirpath, "key.pem")) if err != nil { - return + return info, err } _, errcert := os.Stat(certPath) _, errkey := os.Stat(keyPath) @@ -219,19 +255,17 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali info.ClientCertFile = certPath info.ClientKeyFile = keyPath info.selfCert = true - return + return info, err } serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot generate random number", - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot generate random number", + zap.Error(err), + ) + return info, err } tmpl := x509.Certificate{ @@ -240,17 +274,16 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali NotBefore: time.Now(), NotAfter: time.Now().Add(time.Duration(selfSignedCertValidity) * 365 * (24 * time.Hour)), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCRLSign, ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...), BasicConstraintsValid: true, + IsCA: true, } - if info.Logger != nil { - info.Logger.Warn( - "automatically generate certificates", - zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter), - ) - } + info.Logger.Warn( + "automatically generate certificates", + zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter), + ) for _, host := range hosts { h, _, _ := net.SplitHostPort(host) @@ -263,24 +296,20 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot generate ECDSA key", - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot generate ECDSA key", + zap.Error(err), + ) + return info, err } derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot generate x509 certificate", - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot generate x509 certificate", + zap.Error(err), + ) + return info, err } certOut, err := os.Create(certPath) @@ -290,34 +319,29 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali zap.String("path", certPath), zap.Error(err), ) - return + return info, err } pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) certOut.Close() - if info.Logger != nil { - info.Logger.Info("created cert file", zap.String("path", certPath)) - } + + info.Logger.Info("created cert file", zap.String("path", certPath)) b, err := x509.MarshalECPrivateKey(priv) if err != nil { - return + return info, err } - keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot key file", - zap.String("path", keyPath), - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot key file", + zap.String("path", keyPath), + zap.Error(err), + ) + return info, err } pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) keyOut.Close() - if info.Logger != nil { - info.Logger.Info("created key file", zap.String("path", keyPath)) - } + info.Logger.Info("created key file", zap.String("path", keyPath)) return SelfCert(lg, dirpath, hosts, selfSignedCertValidity) } @@ -326,8 +350,8 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali // Previously, // 1. Server has non-empty (*tls.Config).Certificates on client hello // 2. Server calls (*tls.Config).GetCertificate iff: -// - Server's (*tls.Config).Certificates is not empty, or -// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName +// - Server's (*tls.Config).Certificates is not empty, or +// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName // // When (*tls.Config).Certificates is always populated on initial handshake, // client is expected to provide a valid matching SNI to pass the TLS @@ -365,8 +389,17 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { } } + var minVersion uint16 + if info.MinVersion != 0 { + minVersion = info.MinVersion + } else { + // Default minimum version is TLS 1.2, previous versions are insecure and deprecated. + minVersion = tls.VersionTLS12 + } + cfg := &tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: minVersion, + MaxVersion: info.MaxVersion, ServerName: info.ServerName, } @@ -377,19 +410,52 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { // Client certificates may be verified by either an exact match on the CN, // or a more general check of the CN and SANs. var verifyCertificate func(*x509.Certificate) bool + + if info.AllowedCN != "" && len(info.AllowedCNs) > 0 { + return nil, fmt.Errorf("AllowedCN and AllowedCNs are mutually exclusive (cn=%q, cns=%q)", info.AllowedCN, info.AllowedCNs) + } + if info.AllowedHostname != "" && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedHostname and AllowedHostnames are mutually exclusive (hostname=%q, hostnames=%q)", info.AllowedHostname, info.AllowedHostnames) + } + if info.AllowedCN != "" && info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + if len(info.AllowedCNs) > 0 && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedCNs and AllowedHostnames are mutually exclusive (cns=%q, hostnames=%q)", info.AllowedCNs, info.AllowedHostnames) + } + if info.AllowedCN != "" { - if info.AllowedHostname != "" { - return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) - } + info.Logger.Warn("AllowedCN is deprecated, use AllowedCNs instead") verifyCertificate = func(cert *x509.Certificate) bool { return info.AllowedCN == cert.Subject.CommonName } } if info.AllowedHostname != "" { + info.Logger.Warn("AllowedHostname is deprecated, use AllowedHostnames instead") verifyCertificate = func(cert *x509.Certificate) bool { return cert.VerifyHostname(info.AllowedHostname) == nil } } + if len(info.AllowedCNs) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedCN := range info.AllowedCNs { + if allowedCN == cert.Subject.CommonName { + return true + } + } + return false + } + } + if len(info.AllowedHostnames) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedHostname := range info.AllowedHostnames { + if cert.VerifyHostname(allowedHostname) == nil { + return true + } + } + return false + } + } if verifyCertificate != nil { cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { @@ -408,23 +474,19 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) if os.IsNotExist(err) { - if info.Logger != nil { - info.Logger.Warn( - "failed to find peer cert files", - zap.String("cert-file", info.CertFile), - zap.String("key-file", info.KeyFile), - zap.Error(err), - ) - } + info.Logger.Warn( + "failed to find peer cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) } else if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "failed to create peer certificate", - zap.String("cert-file", info.CertFile), - zap.String("key-file", info.KeyFile), - zap.Error(err), - ) - } + info.Logger.Warn( + "failed to create peer certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) } return cert, err } @@ -435,23 +497,19 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { } cert, err = tlsutil.NewCert(certfile, keyfile, info.parseFunc) if os.IsNotExist(err) { - if info.Logger != nil { - info.Logger.Warn( - "failed to find client cert files", - zap.String("cert-file", certfile), - zap.String("key-file", keyfile), - zap.Error(err), - ) - } + info.Logger.Warn( + "failed to find client cert files", + zap.String("cert-file", certfile), + zap.String("key-file", keyfile), + zap.Error(err), + ) } else if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "failed to create client certificate", - zap.String("cert-file", certfile), - zap.String("key-file", keyfile), - zap.Error(err), - ) - } + info.Logger.Warn( + "failed to create client certificate", + zap.String("cert-file", certfile), + zap.String("key-file", keyfile), + zap.Error(err), + ) } return cert, err } @@ -497,11 +555,6 @@ func (info TLSInfo) ServerConfig() (*tls.Config, error) { // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server cfg.NextProtos = []string{"h2"} - // go1.13 enables TLS 1.3 by default - // and in TLS 1.3, cipher suites are not configurable - // setting Max TLS version to TLS 1.2 for go 1.13 - cfg.MaxVersion = tls.VersionTLS12 - return cfg, nil } @@ -556,11 +609,6 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { } } - // go1.13 enables TLS 1.3 by default - // and in TLS 1.3, cipher suites are not configurable - // setting Max TLS version to TLS 1.2 for go 1.13 - cfg.MaxVersion = tls.VersionTLS12 - return cfg, nil } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go index ad4f6904da901..7536f6aff4626 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package transport import ( diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go index 6f1600945cc6f..2c94841625b0b 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go @@ -19,8 +19,8 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" + "os" "strings" "sync" ) @@ -168,16 +168,16 @@ func (l *tlsListener) acceptLoop() { func checkCRL(crlPath string, cert []*x509.Certificate) error { // TODO: cache - crlBytes, err := ioutil.ReadFile(crlPath) + crlBytes, err := os.ReadFile(crlPath) if err != nil { return err } - certList, err := x509.ParseCRL(crlBytes) + certList, err := x509.ParseRevocationList(crlBytes) if err != nil { return err } revokedSerials := make(map[string]struct{}) - for _, rc := range certList.TBSCertList.RevokedCertificates { + for _, rc := range certList.RevokedCertificateEntries { revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} } for _, c := range cert { @@ -222,7 +222,8 @@ func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { // reverse lookup - wildcards, names := []string{}, []string{} + var names []string + var wildcards []string for _, dns := range dnsNames { if strings.HasPrefix(dns, "*.") { wildcards = append(wildcards, dns[1:]) diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go index 38548ddd71319..49b48dc876797 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package transport import ( @@ -21,12 +35,12 @@ type SocketOpts struct { // in which case lock on data file could result in unexpected // condition. User should take caution to protect against lock race. // [1] https://man7.org/linux/man-pages/man7/socket.7.html - ReusePort bool + ReusePort bool `json:"reuse-port"` // ReuseAddress enables a socket option SO_REUSEADDR which allows // binding to an address in `TIME_WAIT` state. Useful to improve MTTR // in cases where etcd slow to restart due to excessive `TIME_WAIT`. // [1] https://man7.org/linux/man-pages/man7/socket.7.html - ReuseAddress bool + ReuseAddress bool `json:"reuse-address"` } func getControls(sopts *SocketOpts) Controls { diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go new file mode 100644 index 0000000000000..149ad510240b3 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go @@ -0,0 +1,34 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build solaris + +package transport + +import ( + "errors" + "syscall" + + "golang.org/x/sys/unix" +) + +func setReusePort(network, address string, c syscall.RawConn) error { + return errors.New("port reuse is not supported on Solaris") +} + +func setReuseAddress(network, address string, conn syscall.RawConn) error { + return conn.Control(func(fd uintptr) { + syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1) + }) +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go index 432b52e0fcee0..385eadb007449 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go @@ -1,5 +1,18 @@ -//go:build !windows -// +build !windows +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !solaris && !wasm && !js package transport diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go new file mode 100644 index 0000000000000..c6590b1d46964 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go @@ -0,0 +1,30 @@ +// Copyright 2023 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build wasm || js + +package transport + +import ( + "errors" + "syscall" +) + +func setReusePort(network, address string, c syscall.RawConn) error { + return errors.New("port reuse is not supported on WASM") +} + +func setReuseAddress(network, addr string, conn syscall.RawConn) error { + return errors.New("address reuse is not supported on WASM") +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go index 4e5af70b11ebe..2670b4dc7b53e 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go @@ -1,19 +1,32 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + //go:build windows -// +build windows package transport import ( - "fmt" + "errors" "syscall" ) func setReusePort(network, address string, c syscall.RawConn) error { - return fmt.Errorf("port reuse is not supported on Windows") + return errors.New("port reuse is not supported on Windows") } // Windows supports SO_REUSEADDR, but it may cause undefined behavior, as // there is no protection against port hijacking. func setReuseAddress(network, addr string, conn syscall.RawConn) error { - return fmt.Errorf("address reuse is not supported on Windows") + return errors.New("address reuse is not supported on Windows") } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go index 62fe0d3851957..d5375863fd5d6 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go @@ -15,6 +15,8 @@ package transport import ( + "context" + "errors" "fmt" "strings" "time" @@ -27,6 +29,8 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { if err != nil { return nil, err } + defer t.CloseIdleConnections() + var errs []string var endpoints []string for _, ep := range eps { @@ -34,7 +38,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { errs = append(errs, fmt.Sprintf("%q is insecure", ep)) continue } - conn, cerr := t.Dial("tcp", ep[len("https://"):]) + conn, cerr := t.DialContext(context.Background(), "tcp", ep[len("https://"):]) if cerr != nil { errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) continue @@ -43,7 +47,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { endpoints = append(endpoints, ep) } if len(errs) != 0 { - err = fmt.Errorf("%s", strings.Join(errs, ",")) + err = errors.New(strings.Join(errs, ",")) } return endpoints, err } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go index 648512772d370..67170d7436d07 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go @@ -30,10 +30,19 @@ func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, er return nil, err } + var ipAddr net.Addr + if info.LocalAddr != "" { + ipAddr, err = net.ResolveTCPAddr("tcp", info.LocalAddr+":0") + if err != nil { + return nil, err + } + } + t := &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ - Timeout: dialtimeoutd, + Timeout: dialtimeoutd, + LocalAddr: ipAddr, // value taken from http.DefaultTransport KeepAlive: 30 * time.Second, }).DialContext, @@ -57,7 +66,7 @@ func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, er TLSClientConfig: cfg, // Cost of reopening connection on sockets is low, and they are mostly used in testing. // Long living unix-transport connections were leading to 'leak' test flakes. - // Alternativly the returned Transport (t) should override CloseIdleConnections to + // Alternatively the returned Transport (t) should override CloseIdleConnections to // forward it to 'tu' as well. IdleConnTimeout: time.Microsecond, } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go index ae00388dde06c..7a09647b5d52f 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go @@ -14,7 +14,10 @@ package types -import "strconv" +import ( + "strconv" + "strings" +) // ID represents a generic identifier which is canonically // stored as a uint64 but is typically represented as a @@ -37,3 +40,17 @@ type IDSlice []ID func (p IDSlice) Len() int { return len(p) } func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p IDSlice) String() string { + var b strings.Builder + if p.Len() > 0 { + b.WriteString(p[0].String()) + } + + for i := 1; i < p.Len(); i++ { + b.WriteString(",") + b.WriteString(p[i].String()) + } + + return b.String() +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go index e7a3cdc9ab6d6..3e69c8d8b9437 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go @@ -90,7 +90,7 @@ func (us *unsafeSet) Length() int { // Values returns the values of the Set in an unspecified order. func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0) + values = make([]string, 0, len(us.d)) for val := range us.d { values = append(values, val) } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go index 9e5d03ff6457b..49a38967e64d1 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go @@ -36,20 +36,25 @@ func NewURLs(strs []string) (URLs, error) { if err != nil { return nil, err } - if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + + switch u.Scheme { + case "http", "https": + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + case "unix", "unixs": + break + default: return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) } - if _, _, err := net.SplitHostPort(u.Host); err != nil { - return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) - } - if u.Path != "" { - return nil, fmt.Errorf("URL must not contain a path: %s", in) - } all[i] = *u } us := URLs(all) us.Sort() - return us, nil } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go b/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go new file mode 100644 index 0000000000000..a7b2097bed9bf --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go @@ -0,0 +1,80 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "fmt" + "os" + "strings" +) + +const envVerify = "ETCD_VERIFY" + +type VerificationType string + +const ( + envVerifyValueAll VerificationType = "all" + envVerifyValueAssert VerificationType = "assert" +) + +func getEnvVerify() string { + return strings.ToLower(os.Getenv(envVerify)) +} + +func IsVerificationEnabled(verification VerificationType) bool { + env := getEnvVerify() + return env == string(envVerifyValueAll) || env == strings.ToLower(string(verification)) +} + +// EnableVerifications sets `envVerify` and returns a function that +// can be used to bring the original settings. +func EnableVerifications(verification VerificationType) func() { + previousEnv := getEnvVerify() + os.Setenv(envVerify, string(verification)) + return func() { + os.Setenv(envVerify, previousEnv) + } +} + +// EnableAllVerifications enables verification and returns a function +// that can be used to bring the original settings. +func EnableAllVerifications() func() { + return EnableVerifications(envVerifyValueAll) +} + +// DisableVerifications unsets `envVerify` and returns a function that +// can be used to bring the original settings. +func DisableVerifications() func() { + previousEnv := getEnvVerify() + os.Unsetenv(envVerify) + return func() { + os.Setenv(envVerify, previousEnv) + } +} + +// Verify performs verification if the assertions are enabled. +// In the default setup running in tests and skipped in the production code. +func Verify(f func()) { + if IsVerificationEnabled(envVerifyValueAssert) { + f() + } +} + +// Assert will panic with a given formatted message if the given condition is false. +func Assert(condition bool, msg string, v ...any) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/vendor/go.etcd.io/etcd/client/v3/OWNERS b/vendor/go.etcd.io/etcd/client/v3/OWNERS new file mode 100644 index 0000000000000..2b7f28b793969 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +labels: + - area/clientv3 diff --git a/vendor/go.etcd.io/etcd/client/v3/README.md b/vendor/go.etcd.io/etcd/client/v3/README.md index 1e037d7eb6b5b..af0087ebcc0f1 100644 --- a/vendor/go.etcd.io/etcd/client/v3/README.md +++ b/vendor/go.etcd.io/etcd/client/v3/README.md @@ -1,7 +1,7 @@ -# etcd/clientv3 +# etcd/client/v3 [![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) -[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/client/v3) `etcd/clientv3` is the official Go etcd client for v3. @@ -11,26 +11,23 @@ go get go.etcd.io/etcd/client/v3 ``` -Warning: As etcd 3.5.0 was not yet released, the command above does not work. -After first pre-release of 3.5.0 [#12498](https://github.com/etcd-io/etcd/issues/12498), -etcd can be referenced using: -``` -go get go.etcd.io/etcd/client/v3@v3.5.0-pre -``` - ## Get started Create client using `clientv3.New`: ```go -cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, -}) -if err != nil { - // handle error! +import clientv3 "go.etcd.io/etcd/client/v3" + +func main() { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, + }) + if err != nil { + // handle error! + } + defer cli.Close() } -defer cli.Close() ``` etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses diff --git a/vendor/go.etcd.io/etcd/client/v3/auth.go b/vendor/go.etcd.io/etcd/client/v3/auth.go index a6f75d321592e..382172b21bf59 100644 --- a/vendor/go.etcd.io/etcd/client/v3/auth.go +++ b/vendor/go.etcd.io/etcd/client/v3/auth.go @@ -19,9 +19,10 @@ import ( "fmt" "strings" + "google.golang.org/grpc" + "go.etcd.io/etcd/api/v3/authpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "google.golang.org/grpc" ) type ( @@ -134,67 +135,67 @@ func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth { func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthenticateResponse)(resp), toErr(ctx, err) + return (*AuthenticateResponse)(resp), ContextError(ctx, err) } func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) - return (*AuthEnableResponse)(resp), toErr(ctx, err) + return (*AuthEnableResponse)(resp), ContextError(ctx, err) } func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) - return (*AuthDisableResponse)(resp), toErr(ctx, err) + return (*AuthDisableResponse)(resp), ContextError(ctx, err) } func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) { resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...) - return (*AuthStatusResponse)(resp), toErr(ctx, err) + return (*AuthStatusResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) + return (*AuthUserAddResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) + return (*AuthUserAddResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) - return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) + return (*AuthUserDeleteResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) + return (*AuthUserChangePasswordResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) - return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) + return (*AuthUserGrantRoleResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) - return (*AuthUserGetResponse)(resp), toErr(ctx, err) + return (*AuthUserGetResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) - return (*AuthUserListResponse)(resp), toErr(ctx, err) + return (*AuthUserListResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) - return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) + return (*AuthUserRevokeRoleResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) - return (*AuthRoleAddResponse)(resp), toErr(ctx, err) + return (*AuthRoleAddResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { @@ -204,27 +205,27 @@ func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, ke PermType: authpb.Permission_Type(permType), } resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) - return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) + return (*AuthRoleGrantPermissionResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) - return (*AuthRoleGetResponse)(resp), toErr(ctx, err) + return (*AuthRoleGetResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) - return (*AuthRoleListResponse)(resp), toErr(ctx, err) + return (*AuthRoleListResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) - return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) + return (*AuthRoleRevokePermissionResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) - return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) + return (*AuthRoleDeleteResponse)(resp), ContextError(ctx, err) } func StrToPermissionType(s string) (PermissionType, error) { diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go index 2990379ab9f70..24f5988986d35 100644 --- a/vendor/go.etcd.io/etcd/client/v3/client.go +++ b/vendor/go.etcd.io/etcd/client/v3/client.go @@ -18,22 +18,26 @@ import ( "context" "errors" "fmt" - "strconv" "strings" "sync" "time" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/v3/credentials" - "go.etcd.io/etcd/client/v3/internal/endpoint" - "go.etcd.io/etcd/client/v3/internal/resolver" + "github.com/coreos/go-semver/semver" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" grpccredentials "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/api/v3/version" + "go.etcd.io/etcd/client/pkg/v3/logutil" + "go.etcd.io/etcd/client/pkg/v3/verify" + "go.etcd.io/etcd/client/v3/credentials" + "go.etcd.io/etcd/client/v3/internal/endpoint" + "go.etcd.io/etcd/client/v3/internal/resolver" ) var ( @@ -55,7 +59,9 @@ type Client struct { cfg Config creds grpccredentials.TransportCredentials resolver *resolver.EtcdManualResolver - mu *sync.RWMutex + + epMu *sync.RWMutex + endpoints []string ctx context.Context cancel context.CancelFunc @@ -64,7 +70,7 @@ type Client struct { Username string // Password is a password for authentication. Password string - authTokenBundle credentials.Bundle + authTokenBundle credentials.PerRPCCredentialsBundle callOpts []grpc.CallOption @@ -86,7 +92,7 @@ func New(cfg Config) (*Client, error) { // service interface implementations and do not need connection management. func NewCtxClient(ctx context.Context, opts ...Option) *Client { cctx, cancel := context.WithCancel(ctx) - c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex)} + c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex), epMu: new(sync.RWMutex)} for _, opt := range opts { opt(c) } @@ -148,7 +154,7 @@ func (c *Client) Close() error { c.Lease.Close() } if c.conn != nil { - return toErr(c.ctx, c.conn.Close()) + return ContextError(c.ctx, c.conn.Close()) } return c.ctx.Err() } @@ -161,18 +167,18 @@ func (c *Client) Ctx() context.Context { return c.ctx } // Endpoints lists the registered endpoints for the client. func (c *Client) Endpoints() []string { // copy the slice; protect original endpoints from being changed - c.mu.RLock() - defer c.mu.RUnlock() - eps := make([]string, len(c.cfg.Endpoints)) - copy(eps, c.cfg.Endpoints) + c.epMu.RLock() + defer c.epMu.RUnlock() + eps := make([]string, len(c.endpoints)) + copy(eps, c.endpoints) return eps } // SetEndpoints updates client's endpoints. func (c *Client) SetEndpoints(eps ...string) { - c.mu.Lock() - defer c.mu.Unlock() - c.cfg.Endpoints = eps + c.epMu.Lock() + defer c.epMu.Unlock() + c.endpoints = eps c.resolver.SetEndpoints(eps) } @@ -189,7 +195,15 @@ func (c *Client) Sync(ctx context.Context) error { eps = append(eps, m.ClientURLs...) } } + // The linearizable `MemberList` returned successfully, so the + // endpoints shouldn't be empty. + verify.Verify(func() { + if len(eps) == 0 { + panic("empty endpoints returned from etcd cluster") + } + }) c.SetEndpoints(eps...) + c.lg.Debug("set etcd endpoints by autoSync", zap.Strings("endpoints", eps)) return nil } @@ -206,7 +220,7 @@ func (c *Client) autoSync() { ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) err := c.Sync(ctx) cancel() - if err != nil && err != c.ctx.Err() { + if err != nil && !errors.Is(err, c.ctx.Err()) { c.lg.Info("Auto sync endpoints failed.", zap.Error(err)) } } @@ -214,7 +228,9 @@ func (c *Client) autoSync() { } // dialSetupOpts gives the dial opts prior to any authentication. -func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) { +func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) []grpc.DialOption { + var opts []grpc.DialOption + if c.cfg.DialKeepAliveTime > 0 { params := keepalive.ClientParameters{ Time: c.cfg.DialKeepAliveTime, @@ -228,21 +244,36 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts if creds != nil { opts = append(opts, grpc.WithTransportCredentials(creds)) } else { - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + unaryMaxRetries := defaultUnaryMaxRetries + if c.cfg.MaxUnaryRetries > 0 { + unaryMaxRetries = c.cfg.MaxUnaryRetries + } + + backoffWaitBetween := defaultBackoffWaitBetween + if c.cfg.BackoffWaitBetween > 0 { + backoffWaitBetween = c.cfg.BackoffWaitBetween + } + + backoffJitterFraction := defaultBackoffJitterFraction + if c.cfg.BackoffJitterFraction > 0 { + backoffJitterFraction = c.cfg.BackoffJitterFraction } // Interceptor retry and backoff. // TODO: Replace all of clientv3/retry.go with RetryPolicy: // https://github.com/grpc/grpc-proto/blob/cdd9ed5c3d3f87aef62f373b93361cf7bddc620d/grpc/service_config/service_config.proto#L130 - rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction)) + rrBackoff := withBackoff(c.roundRobinQuorumBackoff(backoffWaitBetween, backoffJitterFraction)) opts = append(opts, // Disable stream retry by default since go-grpc-middleware/retry does not support client streams. // Streams that are safe to retry are enabled individually. grpc.WithStreamInterceptor(c.streamClientInterceptor(withMax(0), rrBackoff)), - grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)), + grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(unaryMaxRetries), rrBackoff)), ) - return opts, nil + return opts } // Dial connects to a single endpoint using the client's config. @@ -263,7 +294,8 @@ func (c *Client) getToken(ctx context.Context) error { resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password) if err != nil { - if err == rpctypes.ErrAuthNotEnabled { + if errors.Is(err, rpctypes.ErrAuthNotEnabled) { + c.authTokenBundle.UpdateAuthToken("") return nil } return err @@ -282,12 +314,9 @@ func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, e // dial configures and dials any grpc balancer target. func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts, err := c.dialSetupOpts(creds, dopts...) - if err != nil { - return nil, fmt.Errorf("failed to configure dialer: %v", err) - } - if c.Username != "" && c.Password != "" { - c.authTokenBundle = credentials.NewBundle(credentials.Config{}) + opts := c.dialSetupOpts(creds, dopts...) + + if c.authTokenBundle != nil { opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials())) } @@ -299,7 +328,7 @@ func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc. dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? } - target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.Endpoints()[0])) + target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.endpoints[0])) conn, err := grpc.DialContext(dctx, target, opts...) if err != nil { return nil, err @@ -324,15 +353,15 @@ func authority(endpoint string) string { func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials { r := endpoint.RequiresCredentials(ep) switch r { - case endpoint.CREDS_DROP: + case endpoint.CredsDrop: return nil - case endpoint.CREDS_OPTIONAL: + case endpoint.CredsOptional: return c.creds - case endpoint.CREDS_REQUIRE: + case endpoint.CredsRequire: if c.creds != nil { return c.creds } - return credentials.NewBundle(credentials.Config{}).TransportCredentials() + return credentials.NewTransportCredential(nil) default: panic(fmt.Errorf("unsupported CredsRequirement: %v", r)) } @@ -344,7 +373,7 @@ func newClient(cfg *Config) (*Client, error) { } var creds grpccredentials.TransportCredentials if cfg.TLS != nil { - creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials() + creds = credentials.NewTransportCredential(cfg.TLS) } // use a temporary skeleton client to bootstrap first connection @@ -360,7 +389,7 @@ func newClient(cfg *Config) (*Client, error) { creds: creds, ctx: ctx, cancel: cancel, - mu: new(sync.RWMutex), + epMu: new(sync.RWMutex), callOpts: defaultCallOpts, lgMu: new(sync.RWMutex), } @@ -383,6 +412,7 @@ func newClient(cfg *Config) (*Client, error) { if cfg.Username != "" && cfg.Password != "" { client.Username = cfg.Username client.Password = cfg.Password + client.authTokenBundle = credentials.NewPerRPCCredentialBundle() } if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { @@ -406,8 +436,10 @@ func newClient(cfg *Config) (*Client, error) { if len(cfg.Endpoints) < 1 { client.cancel() - return nil, fmt.Errorf("at least one Endpoint is required in client config") + return nil, errors.New("at least one Endpoint is required in client config") } + client.SetEndpoints(cfg.Endpoints...) + // Use a provided endpoint target so that for https:// without any tls config given, then // grpc will assume the certificate server name is the endpoint host. conn, err := client.dialWithBalancer() @@ -426,7 +458,7 @@ func newClient(cfg *Config) (*Client, error) { client.Auth = NewAuth(client) client.Maintenance = NewMaintenance(client) - //get token with established connection + // get token with established connection ctx, cancel = client.ctx, func() {} if client.cfg.DialTimeout > 0 { ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout) @@ -435,7 +467,7 @@ func newClient(cfg *Config) (*Client, error) { if err != nil { client.Close() cancel() - //TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err) + // TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err) return nil, err } cancel() @@ -467,6 +499,22 @@ func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFracti } } +// minSupportedVersion returns the minimum version supported, which is the previous minor release. +func minSupportedVersion() *semver.Version { + ver := semver.Must(semver.NewVersion(version.Version)) + // consider only major and minor version + ver = &semver.Version{Major: ver.Major, Minor: ver.Minor} + for i := range version.AllVersions { + if version.AllVersions[i].Equal(*ver) { + if i == 0 { + return ver + } + return &version.AllVersions[i-1] + } + } + panic("current version is not in the version list") +} + func (c *Client) checkVersion() (err error) { var wg sync.WaitGroup @@ -488,20 +536,13 @@ func (c *Client) checkVersion() (err error) { errc <- rerr return } - vs := strings.Split(resp.Version, ".") - maj, min := 0, 0 - if len(vs) >= 2 { - var serr error - if maj, serr = strconv.Atoi(vs[0]); serr != nil { - errc <- serr - return - } - if min, serr = strconv.Atoi(vs[1]); serr != nil { - errc <- serr - return - } + vs, serr := semver.NewVersion(resp.Version) + if serr != nil { + errc <- serr + return } - if maj < 3 || (maj == 3 && min < 2) { + + if vs.LessThan(*minSupportedVersion()) { rerr = ErrOldCluster } errc <- rerr @@ -509,7 +550,7 @@ func (c *Client) checkVersion() (err error) { } // wait for success for range eps { - if err = <-errc; err == nil { + if err = <-errc; err != nil { break } } @@ -557,12 +598,15 @@ func isUnavailableErr(ctx context.Context, err error) bool { return false } -func toErr(ctx context.Context, err error) error { +// ContextError converts the error into an EtcdError if the error message matches one of +// the defined messages; otherwise, it tries to retrieve the context error. +func ContextError(ctx context.Context, err error) error { if err == nil { return nil } err = rpctypes.Error(err) - if _, ok := err.(rpctypes.EtcdError); ok { + var serverErr rpctypes.EtcdError + if errors.As(err, &serverErr) { return err } if ev, ok := status.FromError(err); ok { @@ -584,7 +628,7 @@ func canceledByCaller(stopCtx context.Context, err error) bool { return false } - return err == context.Canceled || err == context.DeadlineExceeded + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) } // IsConnCanceled returns true, if error is from a closed gRPC connection. @@ -602,7 +646,7 @@ func IsConnCanceled(err error) bool { } // >= gRPC v1.10.x - if err == context.Canceled { + if errors.Is(err, context.Canceled) { return true } diff --git a/vendor/go.etcd.io/etcd/client/v3/cluster.go b/vendor/go.etcd.io/etcd/client/v3/cluster.go index 92d7cdb56b0ff..1b7e83375c30d 100644 --- a/vendor/go.etcd.io/etcd/client/v3/cluster.go +++ b/vendor/go.etcd.io/etcd/client/v3/cluster.go @@ -17,10 +17,10 @@ package clientv3 import ( "context" + "google.golang.org/grpc" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/types" - - "google.golang.org/grpc" ) type ( @@ -34,7 +34,7 @@ type ( type Cluster interface { // MemberList lists the current cluster membership. - MemberList(ctx context.Context) (*MemberListResponse, error) + MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error) // MemberAdd adds a new member into the cluster. MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) @@ -93,7 +93,7 @@ func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner b } resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*MemberAddResponse)(resp), nil } @@ -102,7 +102,7 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes r := &pb.MemberRemoveRequest{ID: id} resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*MemberRemoveResponse)(resp), nil } @@ -119,23 +119,23 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin if err == nil { return (*MemberUpdateResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } -func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { - // it is safe to retry on list. - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: true}, c.callOpts...) +func (c *cluster) MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error) { + opt := OpGet("", opts...) + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: !opt.serializable}, c.callOpts...) if err == nil { return (*MemberListResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { r := &pb.MemberPromoteRequest{ID: id} resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*MemberPromoteResponse)(resp), nil } diff --git a/vendor/go.etcd.io/etcd/client/v3/compare.go b/vendor/go.etcd.io/etcd/client/v3/compare.go index e2967cf38ed3d..663fdb4d2064e 100644 --- a/vendor/go.etcd.io/etcd/client/v3/compare.go +++ b/vendor/go.etcd.io/etcd/client/v3/compare.go @@ -18,8 +18,10 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) -type CompareTarget int -type CompareResult int +type ( + CompareTarget int + CompareResult int +) const ( CompareVersion CompareTarget = iota @@ -30,7 +32,7 @@ const ( type Cmp pb.Compare -func Compare(cmp Cmp, result string, v interface{}) Cmp { +func Compare(cmp Cmp, result string, v any) Cmp { var r pb.Compare_CompareResult switch result { @@ -120,7 +122,7 @@ func (cmp Cmp) WithPrefix() Cmp { } // mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. -func mustInt64(val interface{}) int64 { +func mustInt64(val any) int64 { if v, ok := val.(int64); ok { return v } @@ -132,7 +134,7 @@ func mustInt64(val interface{}) int64 { // mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an // int64 otherwise. -func mustInt64orLeaseID(val interface{}) int64 { +func mustInt64orLeaseID(val any) int64 { if v, ok := val.(LeaseID); ok { return int64(v) } diff --git a/vendor/go.etcd.io/etcd/client/v3/config.go b/vendor/go.etcd.io/etcd/client/v3/config.go index 335a288732b50..8351828d2f90c 100644 --- a/vendor/go.etcd.io/etcd/client/v3/config.go +++ b/vendor/go.etcd.io/etcd/client/v3/config.go @@ -21,6 +21,8 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" + + "go.etcd.io/etcd/client/pkg/v3/transport" ) type Config struct { @@ -52,7 +54,7 @@ type Config struct { // If 0, it defaults to "math.MaxInt32", because range response can // easily exceed request send limits. // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. - // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + // ("--max-recv-bytes" flag to etcd). MaxCallRecvMsgSize int // TLS holds the client secure credentials, if any. @@ -88,5 +90,139 @@ type Config struct { // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs). PermitWithoutStream bool `json:"permit-without-stream"` + // MaxUnaryRetries is the maximum number of retries for unary RPCs. + MaxUnaryRetries uint `json:"max-unary-retries"` + + // BackoffWaitBetween is the wait time before retrying an RPC. + BackoffWaitBetween time.Duration `json:"backoff-wait-between"` + + // BackoffJitterFraction is the jitter fraction to randomize backoff wait time. + BackoffJitterFraction float64 `json:"backoff-jitter-fraction"` + // TODO: support custom balancer picker } + +// ConfigSpec is the configuration from users, which comes from command-line flags, +// environment variables or config file. It is a fully declarative configuration, +// and can be serialized & deserialized to/from JSON. +type ConfigSpec struct { + Endpoints []string `json:"endpoints"` + RequestTimeout time.Duration `json:"request-timeout"` + DialTimeout time.Duration `json:"dial-timeout"` + KeepAliveTime time.Duration `json:"keepalive-time"` + KeepAliveTimeout time.Duration `json:"keepalive-timeout"` + MaxCallSendMsgSize int `json:"max-request-bytes"` + MaxCallRecvMsgSize int `json:"max-recv-bytes"` + Secure *SecureConfig `json:"secure"` + Auth *AuthConfig `json:"auth"` +} + +type SecureConfig struct { + Cert string `json:"cert"` + Key string `json:"key"` + Cacert string `json:"cacert"` + ServerName string `json:"server-name"` + + InsecureTransport bool `json:"insecure-transport"` + InsecureSkipVerify bool `json:"insecure-skip-tls-verify"` +} + +type AuthConfig struct { + Username string `json:"username"` + Password string `json:"password"` +} + +func (cs *ConfigSpec) Clone() *ConfigSpec { + if cs == nil { + return nil + } + + clone := *cs + + if len(cs.Endpoints) > 0 { + clone.Endpoints = make([]string, len(cs.Endpoints)) + copy(clone.Endpoints, cs.Endpoints) + } + + if cs.Secure != nil { + clone.Secure = &SecureConfig{} + *clone.Secure = *cs.Secure + } + if cs.Auth != nil { + clone.Auth = &AuthConfig{} + *clone.Auth = *cs.Auth + } + + return &clone +} + +func (cfg AuthConfig) Empty() bool { + return cfg.Username == "" && cfg.Password == "" +} + +// NewClientConfig creates a Config based on the provided ConfigSpec. +func NewClientConfig(confSpec *ConfigSpec, lg *zap.Logger) (*Config, error) { + tlsCfg, err := newTLSConfig(confSpec.Secure, lg) + if err != nil { + return nil, err + } + + cfg := &Config{ + Endpoints: confSpec.Endpoints, + DialTimeout: confSpec.DialTimeout, + DialKeepAliveTime: confSpec.KeepAliveTime, + DialKeepAliveTimeout: confSpec.KeepAliveTimeout, + MaxCallSendMsgSize: confSpec.MaxCallSendMsgSize, + MaxCallRecvMsgSize: confSpec.MaxCallRecvMsgSize, + TLS: tlsCfg, + } + + if confSpec.Auth != nil { + cfg.Username = confSpec.Auth.Username + cfg.Password = confSpec.Auth.Password + } + + return cfg, nil +} + +func newTLSConfig(scfg *SecureConfig, lg *zap.Logger) (*tls.Config, error) { + var ( + tlsCfg *tls.Config + err error + ) + + if scfg == nil { + return nil, nil + } + + if scfg.Cert != "" || scfg.Key != "" || scfg.Cacert != "" || scfg.ServerName != "" { + cfgtls := &transport.TLSInfo{ + CertFile: scfg.Cert, + KeyFile: scfg.Key, + TrustedCAFile: scfg.Cacert, + ServerName: scfg.ServerName, + Logger: lg, + } + if tlsCfg, err = cfgtls.ClientConfig(); err != nil { + return nil, err + } + } + + // If key/cert is not given but user wants secure connection, we + // should still setup an empty tls configuration for gRPC to setup + // secure connection. + if tlsCfg == nil && !scfg.InsecureTransport { + tlsCfg = &tls.Config{} + } + + // If the user wants to skip TLS verification then we should set + // the InsecureSkipVerify flag in tls configuration. + if scfg.InsecureSkipVerify { + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + tlsCfg.InsecureSkipVerify = scfg.InsecureSkipVerify + } + + return tlsCfg, nil +} diff --git a/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go index 42f688eb359c4..a2d8b45e86ab6 100644 --- a/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go +++ b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go @@ -19,92 +19,51 @@ package credentials import ( "context" "crypto/tls" - "net" "sync" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" grpccredentials "google.golang.org/grpc/credentials" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) -// Config defines gRPC credential configuration. -type Config struct { - TLSConfig *tls.Config +func NewTransportCredential(cfg *tls.Config) grpccredentials.TransportCredentials { + return grpccredentials.NewTLS(cfg) } -// Bundle defines gRPC credential interface. -type Bundle interface { - grpccredentials.Bundle +// PerRPCCredentialsBundle defines gRPC credential interface. +type PerRPCCredentialsBundle interface { UpdateAuthToken(token string) + PerRPCCredentials() grpccredentials.PerRPCCredentials } -// NewBundle constructs a new gRPC credential bundle. -func NewBundle(cfg Config) Bundle { - return &bundle{ - tc: newTransportCredential(cfg.TLSConfig), - rc: newPerRPCCredential(), +func NewPerRPCCredentialBundle() PerRPCCredentialsBundle { + return &perRPCCredentialBundle{ + rc: &perRPCCredential{}, } } -// bundle implements "grpccredentials.Bundle" interface. -type bundle struct { - tc *transportCredential +// perRPCCredentialBundle implements `PerRPCCredentialsBundle` interface. +type perRPCCredentialBundle struct { rc *perRPCCredential } -func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials { - return b.tc -} - -func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials { - return b.rc -} - -func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) { - // no-op - return nil, nil -} - -// transportCredential implements "grpccredentials.TransportCredentials" interface. -type transportCredential struct { - gtc grpccredentials.TransportCredentials -} - -func newTransportCredential(cfg *tls.Config) *transportCredential { - return &transportCredential{ - gtc: grpccredentials.NewTLS(cfg), - } -} - -func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { - return tc.gtc.ClientHandshake(ctx, authority, rawConn) -} - -func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { - return tc.gtc.ServerHandshake(rawConn) -} - -func (tc *transportCredential) Info() grpccredentials.ProtocolInfo { - return tc.gtc.Info() -} - -func (tc *transportCredential) Clone() grpccredentials.TransportCredentials { - return &transportCredential{ - gtc: tc.gtc.Clone(), +func (b *perRPCCredentialBundle) UpdateAuthToken(token string) { + if b.rc == nil { + return } + b.rc.UpdateAuthToken(token) } -func (tc *transportCredential) OverrideServerName(serverNameOverride string) error { - return tc.gtc.OverrideServerName(serverNameOverride) +func (b *perRPCCredentialBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials { + return b.rc } -// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface. +// perRPCCredential implements `grpccredentials.PerRPCCredentials` interface. type perRPCCredential struct { authToken string authTokenMu sync.RWMutex } -func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} } - func (rc *perRPCCredential) RequireTransportSecurity() bool { return false } func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { @@ -117,13 +76,6 @@ func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil } -func (b *bundle) UpdateAuthToken(token string) { - if b.rc == nil { - return - } - b.rc.UpdateAuthToken(token) -} - func (rc *perRPCCredential) UpdateAuthToken(token string) { rc.authTokenMu.Lock() rc.authToken = token diff --git a/vendor/go.etcd.io/etcd/client/v3/ctx.go b/vendor/go.etcd.io/etcd/client/v3/ctx.go index 56b69cf2ede8a..38cee6c27e4a0 100644 --- a/vendor/go.etcd.io/etcd/client/v3/ctx.go +++ b/vendor/go.etcd.io/etcd/client/v3/ctx.go @@ -17,9 +17,10 @@ package clientv3 import ( "context" + "google.golang.org/grpc/metadata" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/version" - "google.golang.org/grpc/metadata" ) // WithRequireLeader requires client requests to only succeed diff --git a/vendor/go.etcd.io/etcd/client/v3/doc.go b/vendor/go.etcd.io/etcd/client/v3/doc.go index 645d744a5a7f2..bd820d3d79efd 100644 --- a/vendor/go.etcd.io/etcd/client/v3/doc.go +++ b/vendor/go.etcd.io/etcd/client/v3/doc.go @@ -47,8 +47,8 @@ // To specify a client request timeout, wrap the context with context.WithTimeout: // // ctx, cancel := context.WithTimeout(context.Background(), timeout) +// defer cancel() // resp, err := kvc.Put(ctx, "sample_key", "sample_value") -// cancel() // if err != nil { // // handle error! // } @@ -61,7 +61,7 @@ // // 1. context error: canceled or deadline exceeded. // 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded. -// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go +// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go // // Here is the example code to handle client errors: // @@ -102,5 +102,4 @@ // The grpc load balancer is registered statically and is shared across etcd clients. // To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment // variable. E.g. "ETCD_CLIENT_DEBUG=1". -// package clientv3 diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go index 1d3f1a7a2c7f8..2c45b5e3067bd 100644 --- a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go +++ b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go @@ -25,12 +25,12 @@ import ( type CredsRequirement int const ( - // CREDS_REQUIRE - Credentials/certificate required for thi type of connection. - CREDS_REQUIRE CredsRequirement = iota - // CREDS_DROP - Credentials/certificate not needed and should get ignored. - CREDS_DROP - // CREDS_OPTIONAL - Credentials/certificate might be used if supplied - CREDS_OPTIONAL + // CredsRequire - Credentials/certificate required for thi type of connection. + CredsRequire CredsRequirement = iota + // CredsDrop - Credentials/certificate not needed and should get ignored. + CredsDrop + // CredsOptional - Credentials/certificate might be used if supplied + CredsOptional ) func extractHostFromHostPort(ep string) string { @@ -41,12 +41,8 @@ func extractHostFromHostPort(ep string) string { return host } -func extractHostFromPath(pathStr string) string { - return extractHostFromHostPort(path.Base(pathStr)) -} - -//mustSplit2 returns the values from strings.SplitN(s, sep, 2). -//If sep is not found, it returns ("", "", false) instead. +// mustSplit2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. func mustSplit2(s, sep string) (string, string) { spl := strings.SplitN(s, sep, 2) if len(spl) < 2 { @@ -58,20 +54,20 @@ func mustSplit2(s, sep string) (string, string) { func schemeToCredsRequirement(schema string) CredsRequirement { switch schema { case "https", "unixs": - return CREDS_REQUIRE + return CredsRequire case "http": - return CREDS_DROP + return CredsDrop case "unix": // Preserving previous behavior from: // https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212 // that likely was a bug due to missing 'fallthrough'. // At the same time it seems legit to let the users decide whether they // want credential control or not (and 'unixs' schema is not a standard thing). - return CREDS_OPTIONAL + return CredsOptional case "": - return CREDS_OPTIONAL + return CredsOptional default: - return CREDS_OPTIONAL + return CredsOptional } } @@ -81,11 +77,12 @@ func schemeToCredsRequirement(schema string) CredsRequirement { // The main differences: // - etcd supports unixs & https names as opposed to unix & http to // distinguish need to configure certificates. -// - etcd support http(s) names as opposed to tcp supported by grpc/dial method. -// - etcd supports unix(s)://local-file naming schema +// - etcd support http(s) names as opposed to tcp supported by grpc/dial method. +// - etcd supports unix(s)://local-file naming schema // (as opposed to unix:local-file canonical name used by grpc for current dir files). -// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon) -// is considered serverName - to allow local testing of cert-protected communication. +// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon) +// is considered serverName - to allow local testing of cert-protected communication. +// // See more: // - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47 // - https://golang.org/pkg/net/#Dial @@ -95,29 +92,29 @@ func translateEndpoint(ep string) (addr string, serverName string, requireCreds if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") { // absolute path case schema, absolutePath := mustSplit2(ep, "://") - return "unix://" + absolutePath, extractHostFromPath(absolutePath), schemeToCredsRequirement(schema) + return "unix://" + absolutePath, path.Base(absolutePath), schemeToCredsRequirement(schema) } if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") { // legacy etcd local path schema, localPath := mustSplit2(ep, "://") - return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema) + return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema) } schema, localPath := mustSplit2(ep, ":") - return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema) + return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema) } if strings.Contains(ep, "://") { url, err := url.Parse(ep) if err != nil { - return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL + return ep, ep, CredsOptional } if url.Scheme == "http" || url.Scheme == "https" { - return url.Host, url.Hostname(), schemeToCredsRequirement(url.Scheme) + return url.Host, url.Host, schemeToCredsRequirement(url.Scheme) } - return ep, url.Hostname(), schemeToCredsRequirement(url.Scheme) + return ep, url.Host, schemeToCredsRequirement(url.Scheme) } // Handles plain addresses like 10.0.0.44:437. - return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL + return ep, ep, CredsOptional } // RequiresCredentials returns whether given endpoint requires diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go index 3ee3cb8e2bb9c..403b745cb723a 100644 --- a/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go +++ b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go @@ -15,10 +15,11 @@ package resolver import ( - "go.etcd.io/etcd/client/v3/internal/endpoint" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" + + "go.etcd.io/etcd/client/v3/internal/endpoint" ) const ( @@ -60,13 +61,15 @@ func (r *EtcdManualResolver) SetEndpoints(endpoints []string) { func (r EtcdManualResolver) updateState() { if r.CC != nil { - addresses := make([]resolver.Address, len(r.endpoints)) + eps := make([]resolver.Endpoint, len(r.endpoints)) for i, ep := range r.endpoints { addr, serverName := endpoint.Interpret(ep) - addresses[i] = resolver.Address{Addr: addr, ServerName: serverName} + eps[i] = resolver.Endpoint{Addresses: []resolver.Address{ + {Addr: addr, ServerName: serverName}, + }} } state := resolver.State{ - Addresses: addresses, + Endpoints: eps, ServiceConfig: r.serviceConfig, } r.UpdateState(state) diff --git a/vendor/go.etcd.io/etcd/client/v3/kv.go b/vendor/go.etcd.io/etcd/client/v3/kv.go index 5e9fb7d45896b..8d0c595d1e43d 100644 --- a/vendor/go.etcd.io/etcd/client/v3/kv.go +++ b/vendor/go.etcd.io/etcd/client/v3/kv.go @@ -17,9 +17,10 @@ package clientv3 import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) type ( @@ -79,12 +80,15 @@ func (op OpResponse) Txn() *TxnResponse { return op.txn } func (resp *PutResponse) OpResponse() OpResponse { return OpResponse{put: resp} } + func (resp *GetResponse) OpResponse() OpResponse { return OpResponse{get: resp} } + func (resp *DeleteResponse) OpResponse() OpResponse { return OpResponse{del: resp} } + func (resp *TxnResponse) OpResponse() OpResponse { return OpResponse{txn: resp} } @@ -112,23 +116,23 @@ func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { r, err := kv.Do(ctx, OpPut(key, val, opts...)) - return r.put, toErr(ctx, err) + return r.put, ContextError(ctx, err) } func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { r, err := kv.Do(ctx, OpGet(key, opts...)) - return r.get, toErr(ctx, err) + return r.get, ContextError(ctx, err) } func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { r, err := kv.Do(ctx, OpDelete(key, opts...)) - return r.del, toErr(ctx, err) + return r.del, ContextError(ctx, err) } func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*CompactResponse)(resp), err } @@ -145,10 +149,14 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { case tRange: - var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) - if err == nil { - return OpResponse{get: (*GetResponse)(resp)}, nil + if op.IsSortOptionValid() { + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + } else { + err = rpctypes.ErrInvalidSortOption } case tPut: var resp *pb.PutResponse @@ -173,5 +181,5 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { default: panic("Unknown op") } - return OpResponse{}, toErr(ctx, err) + return OpResponse{}, ContextError(ctx, err) } diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go index bd31e6b4a5b45..11b5834828633 100644 --- a/vendor/go.etcd.io/etcd/client/v3/lease.go +++ b/vendor/go.etcd.io/etcd/client/v3/lease.go @@ -16,15 +16,16 @@ package clientv3 import ( "context" + "errors" "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) type ( @@ -198,12 +199,12 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout keepAlives: make(map[LeaseID]*keepAlive), remote: remote, firstKeepAliveTimeout: keepAliveTimeout, - lg: c.lg, } if l.firstKeepAliveTimeout == time.Second { l.firstKeepAliveTimeout = defaultTTL } if c != nil { + l.lg = c.lg l.callOpts = c.callOpts } reqLeaderCtx := WithRequireLeader(context.Background()) @@ -223,7 +224,7 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err } return gresp, nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { @@ -232,14 +233,14 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, if err == nil { return (*LeaseRevokeResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { r := toLeaseTimeToLiveRequest(id, opts...) resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } gresp := &LeaseTimeToLiveResponse{ ResponseHeader: resp.GetHeader(), @@ -260,9 +261,15 @@ func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { } return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } +// To identify the context passed to `KeepAlive`, a key/value pair is +// attached to the context. The key is a `keepAliveCtxKey` object, and +// the value is the pointer to the context object itself, ensuring +// uniqueness as each context has a unique memory address. +type keepAliveCtxKey struct{} + func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) @@ -277,6 +284,10 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl default: } ka, ok := l.keepAlives[id] + + if ctx.Done() != nil { + ctx = context.WithValue(ctx, keepAliveCtxKey{}, &ctx) + } if !ok { // create fresh keep alive ka = &keepAlive{ @@ -294,7 +305,9 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl } l.mu.Unlock() - go l.keepAliveCtxCloser(ctx, id, ka.donec) + if ctx.Done() != nil { + go l.keepAliveCtxCloser(ctx, id, ka.donec) + } l.firstKeepAliveOnce.Do(func() { go l.recvKeepAliveLoop() go l.deadlineLoop() @@ -313,7 +326,7 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive return resp, err } if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } } } @@ -345,7 +358,7 @@ func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-cha // close channel and remove context if still associated with keep alive for i, c := range ka.ctxs { - if c == ctx { + if c.Value(keepAliveCtxKey{}) == ctx.Value(keepAliveCtxKey{}) { close(ka.chs[i]) ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) @@ -397,26 +410,35 @@ func (l *lessor) closeRequireLeader() { } } -func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { +func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKeepAliveResponse, ferr error) { cctx, cancel := context.WithCancel(ctx) defer cancel() stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } + defer func() { + if cerr := stream.CloseSend(); cerr != nil { + if ferr == nil { + ferr = ContextError(ctx, cerr) + } + return + } + }() + err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } resp, rerr := stream.Recv() if rerr != nil { - return nil, toErr(ctx, rerr) + return nil, ContextError(ctx, rerr) } - karesp := &LeaseKeepAliveResponse{ + karesp = &LeaseKeepAliveResponse{ ResponseHeader: resp.GetHeader(), ID: LeaseID(resp.ID), TTL: resp.TTL, @@ -439,6 +461,9 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { for { stream, err := l.resetRecv() if err != nil { + l.lg.Warn("error occurred during lease keep alive loop", + zap.Error(err), + ) if canceledByCaller(l.stopCtx, err) { return err } @@ -450,7 +475,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { return err } - if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + if errors.Is(ContextError(l.stopCtx, err), rpctypes.ErrNoLeader) { l.closeRequireLeader() } break @@ -535,9 +560,12 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { // deadlineLoop reaps any keep alive channels that have not received a response // within the lease TTL func (l *lessor) deadlineLoop() { + timer := time.NewTimer(time.Second) + defer timer.Stop() for { + timer.Reset(time.Second) select { - case <-time.After(time.Second): + case <-timer.C: case <-l.donec: return } @@ -571,7 +599,9 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { for _, id := range tosend { r := &pb.LeaseKeepAliveRequest{ID: int64(id)} if err := stream.Send(r); err != nil { - // TODO do something with this error? + l.lg.Warn("error occurred during lease keep alive request sending", + zap.Error(err), + ) return } } diff --git a/vendor/go.etcd.io/etcd/client/v3/logger.go b/vendor/go.etcd.io/etcd/client/v3/logger.go index ecac42730f619..300363cd25bab 100644 --- a/vendor/go.etcd.io/etcd/client/v3/logger.go +++ b/vendor/go.etcd.io/etcd/client/v3/logger.go @@ -18,10 +18,11 @@ import ( "log" "os" - "go.etcd.io/etcd/client/pkg/v3/logutil" "go.uber.org/zap/zapcore" "go.uber.org/zap/zapgrpc" "google.golang.org/grpc/grpclog" + + "go.etcd.io/etcd/client/pkg/v3/logutil" ) func init() { @@ -51,8 +52,8 @@ func etcdClientDebugLevel() zapcore.Level { return zapcore.InfoLevel } var l zapcore.Level - if err := l.Set(envLevel); err == nil { - log.Printf("Deprecated env ETCD_CLIENT_DEBUG value. Using default level: 'info'") + if err := l.Set(envLevel); err != nil { + log.Print("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'") return zapcore.InfoLevel } return l diff --git a/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/vendor/go.etcd.io/etcd/client/v3/maintenance.go index dbea530e66a24..00aaacd15fdc2 100644 --- a/vendor/go.etcd.io/etcd/client/v3/maintenance.go +++ b/vendor/go.etcd.io/etcd/client/v3/maintenance.go @@ -16,12 +16,14 @@ package clientv3 import ( "context" + "errors" "fmt" "io" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.uber.org/zap" "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) type ( @@ -31,6 +33,15 @@ type ( StatusResponse pb.StatusResponse HashKVResponse pb.HashKVResponse MoveLeaderResponse pb.MoveLeaderResponse + DowngradeResponse pb.DowngradeResponse + + DowngradeAction pb.DowngradeRequest_DowngradeAction +) + +const ( + DowngradeValidate = DowngradeAction(pb.DowngradeRequest_VALIDATE) + DowngradeEnable = DowngradeAction(pb.DowngradeRequest_ENABLE) + DowngradeCancel = DowngradeAction(pb.DowngradeRequest_CANCEL) ) type Maintenance interface { @@ -57,14 +68,40 @@ type Maintenance interface { // is non-zero, the hash is computed on all keys at or below the given revision. HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) + // SnapshotWithVersion returns a reader for a point-in-time snapshot and version of etcd that created it. + // If the context "ctx" is canceled or timed out, reading from returned + // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) + // Snapshot provides a reader for a point-in-time snapshot of etcd. // If the context "ctx" is canceled or timed out, reading from returned // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + // Deprecated: use SnapshotWithVersion instead. Snapshot(ctx context.Context) (io.ReadCloser, error) // MoveLeader requests current leader to transfer its leadership to the transferee. // Request must be made to the leader. MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) + + // Downgrade requests downgrades, verifies feasibility or cancels downgrade + // on the cluster version. + // Supported since etcd 3.5. + Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) +} + +// SnapshotResponse is aggregated response from the snapshot stream. +// Consumer is responsible for closing steam by calling .Snapshot.Close() +type SnapshotResponse struct { + // Header is the first header in the snapshot stream, has the current key-value store information + // and indicates the point in time of the snapshot. + Header *pb.ResponseHeader + // Snapshot exposes ReaderCloser interface for data stored in the Blob field in the snapshot stream. + Snapshot io.ReadCloser + // Version is the local version of server that created the snapshot. + // In cluster with binaries with different version, each cluster can return different result. + // Informs which etcd server version should be used when restoring the snapshot. + // Supported on etcd >= v3.6. + Version string } type maintenance struct { @@ -80,21 +117,10 @@ func NewMaintenance(c *Client) Maintenance { dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { conn, err := c.Dial(endpoint) if err != nil { - return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err) + return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %w", endpoint, err) } - //get token with established connection - dctx := c.ctx - cancel := func() {} - if c.cfg.DialTimeout > 0 { - dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) - } - err = c.getToken(dctx) - cancel() - if err != nil { - return nil, nil, fmt.Errorf("failed to getToken from endpoint %s with maintenance client: %v", endpoint, err) - } - cancel = func() { conn.Close() } + cancel := func() { conn.Close() } return RetryMaintenanceClient(c, conn), cancel, nil }, remote: RetryMaintenanceClient(c, c.conn), @@ -107,7 +133,6 @@ func NewMaintenance(c *Client) Maintenance { func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { api := &maintenance{ - lg: c.lg, dial: func(string) (pb.MaintenanceClient, func(), error) { return remote, func() {}, nil }, @@ -115,6 +140,7 @@ func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) } if c != nil { api.callOpts = c.callOpts + api.lg = c.lg } return api } @@ -129,7 +155,7 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { if err == nil { return (*AlarmResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { @@ -142,13 +168,13 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { ar, err := m.AlarmList(ctx) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } ret := AlarmResponse{} for _, am := range ar.Alarms { dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) if derr != nil { - return nil, toErr(ctx, derr) + return nil, ContextError(ctx, derr) } ret.Alarms = append(ret.Alarms, dresp.Alarms...) } @@ -159,18 +185,18 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR if err == nil { return (*AlarmResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { remote, cancel, err := m.dial(endpoint) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer cancel() resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*DefragmentResponse)(resp), nil } @@ -178,12 +204,12 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { remote, cancel, err := m.dial(endpoint) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer cancel() resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*StatusResponse)(resp), nil } @@ -191,46 +217,78 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { remote, cancel, err := m.dial(endpoint) if err != nil { - - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer cancel() resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*HashKVResponse)(resp), nil } -func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { +func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) { ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } m.lg.Info("opened snapshot stream; downloading") pr, pw := io.Pipe() + + resp, err := ss.Recv() + if err != nil { + m.logAndCloseWithError(err, pw) + return nil, err + } go func() { + // Saving response is blocking + err := m.save(resp, pw) + if err != nil { + m.logAndCloseWithError(err, pw) + return + } for { - resp, err := ss.Recv() + sresp, err := ss.Recv() + if err != nil { + m.logAndCloseWithError(err, pw) + return + } + + err = m.save(sresp, pw) if err != nil { - switch err { - case io.EOF: - m.lg.Info("completed snapshot read; closing") - default: - m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) - } - pw.CloseWithError(err) + m.logAndCloseWithError(err, pw) return } + } + }() + + return &SnapshotResponse{ + Header: resp.GetHeader(), + Snapshot: &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, + Version: resp.GetVersion(), + }, nil +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) + if err != nil { + return nil, ContextError(ctx, err) + } - // can "resp == nil && err == nil" - // before we receive snapshot SHA digest? - // No, server sends EOF with an empty response - // after it sends SHA digest at the end + m.lg.Info("opened snapshot stream; downloading") + pr, pw := io.Pipe() - if _, werr := pw.Write(resp.Blob); werr != nil { - pw.CloseWithError(werr) + go func() { + for { + resp, err := ss.Recv() + if err != nil { + m.logAndCloseWithError(err, pw) + return + } + err = m.save(resp, pw) + if err != nil { + m.logAndCloseWithError(err, pw) return } } @@ -238,6 +296,28 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil } +func (m *maintenance) logAndCloseWithError(err error, pw *io.PipeWriter) { + switch { + case errors.Is(err, io.EOF): + m.lg.Info("completed snapshot read; closing") + default: + m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) + } + pw.CloseWithError(err) +} + +func (m *maintenance) save(resp *pb.SnapshotResponse, pw *io.PipeWriter) error { + // can "resp == nil && err == nil" + // before we receive snapshot SHA digest? + // No, server sends EOF with an empty response + // after it sends SHA digest at the end + + if _, werr := pw.Write(resp.Blob); werr != nil { + return werr + } + return nil +} + type snapshotReadCloser struct { ctx context.Context io.ReadCloser @@ -245,10 +325,26 @@ type snapshotReadCloser struct { func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { n, err = rc.ReadCloser.Read(p) - return n, toErr(rc.ctx, err) + return n, ContextError(rc.ctx, err) } func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) - return (*MoveLeaderResponse)(resp), toErr(ctx, err) + return (*MoveLeaderResponse)(resp), ContextError(ctx, err) +} + +func (m *maintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) { + var actionType pb.DowngradeRequest_DowngradeAction + switch action { + case DowngradeValidate: + actionType = pb.DowngradeRequest_VALIDATE + case DowngradeEnable: + actionType = pb.DowngradeRequest_ENABLE + case DowngradeCancel: + actionType = pb.DowngradeRequest_CANCEL + default: + return nil, errors.New("etcdclient: unknown downgrade action") + } + resp, err := m.remote.Downgrade(ctx, &pb.DowngradeRequest{Action: actionType, Version: version}, m.callOpts...) + return (*DowngradeResponse)(resp), ContextError(ctx, err) } diff --git a/vendor/go.etcd.io/etcd/client/v3/op.go b/vendor/go.etcd.io/etcd/client/v3/op.go index e8c0c1e08c964..20cb34f5c3c98 100644 --- a/vendor/go.etcd.io/etcd/client/v3/op.go +++ b/vendor/go.etcd.io/etcd/client/v3/op.go @@ -106,6 +106,9 @@ func (op Op) RangeBytes() []byte { return op.end } // Rev returns the requested revision, if any. func (op Op) Rev() int64 { return op.rev } +// Limit returns limit of the result, if any. +func (op Op) Limit() int64 { return op.limit } + // IsPut returns true iff the operation is a Put. func (op Op) IsPut() bool { return op.t == tPut } @@ -124,6 +127,10 @@ func (op Op) IsKeysOnly() bool { return op.keysOnly } // IsCountOnly returns whether countOnly is set. func (op Op) IsCountOnly() bool { return op.countOnly } +func (op Op) IsOptsWithFromKey() bool { return op.isOptsWithFromKey } + +func (op Op) IsOptsWithPrefix() bool { return op.isOptsWithPrefix } + // MinModRev returns the operation's minimum modify revision. func (op Op) MinModRev() int64 { return op.minModRev } @@ -389,12 +396,12 @@ func getPrefix(key []byte) []byte { // can return 'foo1', 'foo2', and so on. func WithPrefix() OpOption { return func(op *Op) { + op.isOptsWithPrefix = true if len(op.key) == 0 { op.key, op.end = []byte{0}, []byte{0} return } op.end = getPrefix(op.key) - op.isOptsWithPrefix = true } } @@ -418,9 +425,15 @@ func WithFromKey() OpOption { } } -// WithSerializable makes 'Get' request serializable. By default, -// it's linearizable. Serializable requests are better for lower latency -// requirement. +// WithSerializable makes `Get` and `MemberList` requests serializable. +// By default, they are linearizable. Serializable requests are better +// for lower latency requirement, but users should be aware that they +// could get stale data with serializable requests. +// +// In some situations users may want to use serializable requests. For +// example, when adding a new member to a one-node cluster, it's reasonable +// and safe to use serializable request before the new added member gets +// started. func WithSerializable() OpOption { return func(op *Op) { op.serializable = true } } @@ -581,3 +594,19 @@ func IsOptsWithFromKey(opts []OpOption) bool { return ret.isOptsWithFromKey } + +func (op Op) IsSortOptionValid() bool { + if op.sort != nil { + sortOrder := int32(op.sort.Order) + sortTarget := int32(op.sort.Target) + + if _, ok := pb.RangeRequest_SortOrder_name[sortOrder]; !ok { + return false + } + + if _, ok := pb.RangeRequest_SortTarget_name[sortTarget]; !ok { + return false + } + } + return true +} diff --git a/vendor/go.etcd.io/etcd/client/v3/options.go b/vendor/go.etcd.io/etcd/client/v3/options.go index cdae1b16a2aa1..cc10a03d76d1c 100644 --- a/vendor/go.etcd.io/etcd/client/v3/options.go +++ b/vendor/go.etcd.io/etcd/client/v3/options.go @@ -23,7 +23,7 @@ import ( var ( // client-side handling retrying of request failures where data was not written to the wire or - // where server indicates it did not process the data. gRPC default is default is "WaitForReady(false)" + // where server indicates it did not process the data. gRPC default is "WaitForReady(false)" // but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to // transient failures. defaultWaitForReady = grpc.WaitForReady(true) diff --git a/vendor/go.etcd.io/etcd/client/v3/retry.go b/vendor/go.etcd.io/etcd/client/v3/retry.go index 69ecc63147197..9152c53a7d4c5 100644 --- a/vendor/go.etcd.io/etcd/client/v3/retry.go +++ b/vendor/go.etcd.io/etcd/client/v3/retry.go @@ -16,13 +16,14 @@ package clientv3 import ( "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "errors" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) type retryPolicy uint8 @@ -52,7 +53,8 @@ func (rp retryPolicy) String() string { // handle itself even with retries. func isSafeRetryImmutableRPC(err error) bool { eErr := rpctypes.Error(err) - if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + var serverErr rpctypes.EtcdError + if errors.As(eErr, &serverErr) && serverErr.Code() != codes.Unavailable { // interrupted by non-transient server-side or gRPC-side error // client cannot handle itself (e.g. rpctypes.ErrCompacted) return false @@ -101,8 +103,9 @@ func RetryKVClient(c *Client) pb.KVClient { kc: pb.NewKVClient(c.conn), } } + func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rkv.kc.Range(ctx, in, append(opts, withRepeatablePolicy())...) } func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { @@ -133,23 +136,23 @@ func RetryLeaseClient(c *Client) pb.LeaseClient { } func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { - return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRepeatablePolicy())...) } func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { - return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseLeases(ctx, in, append(opts, withRepeatablePolicy())...) } func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseGrant(ctx, in, append(opts, withRepeatablePolicy())...) } func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRepeatablePolicy())...) } func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { - return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRepeatablePolicy())...) } type retryClusterClient struct { @@ -164,7 +167,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient { } func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { - return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rcc.cc.MemberList(ctx, in, append(opts, withRepeatablePolicy())...) } func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { @@ -195,27 +198,27 @@ func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClie } func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { - return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.Alarm(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { - return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.Status(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { - return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.Hash(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { - return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.HashKV(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { - return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.Snapshot(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { - return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.MoveLeader(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { @@ -238,19 +241,19 @@ func RetryAuthClient(c *Client) pb.AuthClient { } func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { - return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rac.ac.UserList(ctx, in, append(opts, withRepeatablePolicy())...) } func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { - return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rac.ac.UserGet(ctx, in, append(opts, withRepeatablePolicy())...) } func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { - return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rac.ac.RoleGet(ctx, in, append(opts, withRepeatablePolicy())...) } func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { - return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rac.ac.RoleList(ctx, in, append(opts, withRepeatablePolicy())...) } func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { diff --git a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go index 04f157a1dcbb6..7703e673b0612 100644 --- a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go +++ b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go @@ -19,16 +19,18 @@ package clientv3 import ( "context" + "errors" "io" "sync" "time" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) // unaryClientInterceptor returns a new retrying unary client interceptor. @@ -37,7 +39,7 @@ import ( // changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClientInterceptor { intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { ctx = withVersion(ctx) grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) @@ -53,6 +55,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien c.GetLogger().Debug( "retrying of unary invoker", zap.String("target", cc.Target()), + zap.String("method", method), zap.Uint("attempt", attempt), ) lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) @@ -62,6 +65,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien c.GetLogger().Warn( "retrying of unary invoker failed", zap.String("target", cc.Target()), + zap.String("method", method), zap.Uint("attempt", attempt), zap.Error(lastErr), ) @@ -74,24 +78,18 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien continue } if c.shouldRefreshToken(lastErr, callOpts) { - // clear auth token before refreshing it. - // call c.Auth.Authenticate with an invalid token will always fail the auth check on the server-side, - // if the server has not apply the patch of pr #12165 (https://github.com/etcd-io/etcd/pull/12165) - // and a rpctypes.ErrInvalidAuthToken will recursively call c.getToken until system run out of resource. - c.authTokenBundle.UpdateAuthToken("") - - gterr := c.getToken(ctx) - if gterr != nil { + gtErr := c.refreshToken(ctx) + if gtErr != nil { c.GetLogger().Warn( "retrying of unary invoker failed to fetch new auth token", zap.String("target", cc.Target()), - zap.Error(gterr), + zap.Error(gtErr), ) - return gterr // lastErr must be invalid auth token + return gtErr // lastErr must be invalid auth token } continue } - if !isSafeRetry(c.lg, lastErr, callOpts) { + if !isSafeRetry(c, lastErr, callOpts) { return lastErr } } @@ -111,15 +109,12 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { ctx = withVersion(ctx) - // getToken automatically - // TODO(cfc4n): keep this code block, remove codes about getToken in client.go after pr #12165 merged. - if c.authTokenBundle != nil { - // equal to c.Username != "" && c.Password != "" - err := c.getToken(ctx) - if err != nil && rpctypes.Error(err) != rpctypes.ErrAuthNotEnabled { - c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err)) - return nil, err - } + // getToken automatically. Otherwise, auth token may be invalid after watch reconnection because the token has expired + // (see https://github.com/etcd-io/etcd/issues/11954 for more). + err := c.getToken(ctx) + if err != nil { + c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err)) + return nil, err } grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) @@ -151,14 +146,31 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli // shouldRefreshToken checks whether there's a need to refresh the token based on the error and callOptions, // and returns a boolean value. func (c *Client) shouldRefreshToken(err error, callOpts *options) bool { - if rpctypes.Error(err) == rpctypes.ErrUserEmpty { + if errors.Is(rpctypes.Error(err), rpctypes.ErrUserEmpty) { // refresh the token when username, password is present but the server returns ErrUserEmpty // which is possible when the client token is cleared somehow return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != "" } return callOpts.retryAuth && - (rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision) + (errors.Is(rpctypes.Error(err), rpctypes.ErrInvalidAuthToken) || errors.Is(rpctypes.Error(err), rpctypes.ErrAuthOldRevision)) +} + +func (c *Client) refreshToken(ctx context.Context) error { + if c.authTokenBundle == nil { + // c.authTokenBundle will be initialized only when + // c.Username != "" && c.Password != "". + // + // When users use the TLS CommonName based authentication, the + // authTokenBundle is always nil. But it's possible for the clients + // to get `rpctypes.ErrAuthOldRevision` response when the clients + // concurrently modify auth data (e.g, addUser, deleteUser etc.). + // In this case, there is no need to refresh the token; instead the + // clients just need to retry the operations (e.g. Put, Delete etc). + return nil + } + + return c.getToken(ctx) } // type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a @@ -167,9 +179,9 @@ func (c *Client) shouldRefreshToken(err error, callOpts *options) bool { type serverStreamingRetryingStream struct { grpc.ClientStream client *Client - bufferedSends []interface{} // single message that the client can sen - receivedGood bool // indicates whether any prior receives were successful - wasClosedSend bool // indicates that CloseSend was closed + bufferedSends []any // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed ctx context.Context callOpts *options streamerCall func(ctx context.Context) (grpc.ClientStream, error) @@ -188,7 +200,7 @@ func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { return s.ClientStream } -func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { +func (s *serverStreamingRetryingStream) SendMsg(m any) error { s.mu.Lock() s.bufferedSends = append(s.bufferedSends, m) s.mu.Unlock() @@ -210,7 +222,7 @@ func (s *serverStreamingRetryingStream) Trailer() metadata.MD { return s.getStream().Trailer() } -func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { +func (s *serverStreamingRetryingStream) RecvMsg(m any) error { attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) if !attemptRetry { return lastErr // success or hard failure @@ -237,12 +249,12 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { return lastErr } -func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m any) (bool, error) { s.mu.RLock() wasGood := s.receivedGood s.mu.RUnlock() err := s.getStream().RecvMsg(m) - if err == nil || err == io.EOF { + if err == nil || errors.Is(err, io.EOF) { s.mu.Lock() s.receivedGood = true s.mu.Unlock() @@ -259,18 +271,14 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{} return true, err } if s.client.shouldRefreshToken(err, s.callOpts) { - // clear auth token to avoid failure when call getToken - s.client.authTokenBundle.UpdateAuthToken("") - - gterr := s.client.getToken(s.ctx) - if gterr != nil { - s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr)) + gtErr := s.client.refreshToken(s.ctx) + if gtErr != nil { + s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gtErr)) return false, err // return the original error for simplicity } return true, err - } - return isSafeRetry(s.client.lg, err, s.callOpts), err + return isSafeRetry(s.client, err, s.callOpts), err } func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { @@ -302,7 +310,7 @@ func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) erro select { case <-ctx.Done(): timer.Stop() - return contextErrToGrpcErr(ctx.Err()) + return contextErrToGRPCErr(ctx.Err()) case <-timer.C: } } @@ -310,17 +318,28 @@ func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) erro } // isSafeRetry returns "true", if request is safe for retry with the given error. -func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool { +func isSafeRetry(c *Client, err error, callOpts *options) bool { if isContextError(err) { return false } + + // Situation when learner refuses RPC it is supposed to not serve is from the server + // perspective not retryable. + // But for backward-compatibility reasons we need to support situation that + // customer provides mix of learners (not yet voters) and voters with an + // expectation to pick voter in the next attempt. + // TODO: Ideally client should be 'aware' which endpoint represents: leader/voter/learner with high probability. + if errors.Is(err, rpctypes.ErrGRPCNotSupportedForLearner) && len(c.Endpoints()) > 1 { + return true + } + switch callOpts.retryPolicy { case repeatable: return isSafeRetryImmutableRPC(err) case nonRepeatable: return isSafeRetryMutableRPC(err) default: - lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) + c.lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) return false } } @@ -329,25 +348,23 @@ func isContextError(err error) bool { return status.Code(err) == codes.DeadlineExceeded || status.Code(err) == codes.Canceled } -func contextErrToGrpcErr(err error) error { - switch err { - case context.DeadlineExceeded: - return status.Errorf(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Errorf(codes.Canceled, err.Error()) +func contextErrToGRPCErr(err error) error { + switch { + case errors.Is(err, context.DeadlineExceeded): + return status.Error(codes.DeadlineExceeded, err.Error()) + case errors.Is(err, context.Canceled): + return status.Error(codes.Canceled, err.Error()) default: - return status.Errorf(codes.Unknown, err.Error()) + return status.Error(codes.Unknown, err.Error()) } } -var ( - defaultOptions = &options{ - retryPolicy: nonRepeatable, - max: 0, // disable - backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), - retryAuth: true, - } -) +var defaultOptions = &options{ + retryPolicy: nonRepeatable, + max: 0, // disable + backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + retryAuth: true, +} // backoffFunc denotes a family of functions that control the backoff duration between call retries. // @@ -357,10 +374,10 @@ var ( // with the next iteration. type backoffFunc func(attempt uint) time.Duration -// withRetryPolicy sets the retry policy of this call. -func withRetryPolicy(rp retryPolicy) retryOption { +// withRepeatablePolicy sets the repeatable policy of this call. +func withRepeatablePolicy() retryOption { return retryOption{applyFunc: func(o *options) { - o.retryPolicy = rp + o.retryPolicy = repeatable }} } @@ -371,7 +388,7 @@ func withMax(maxRetries uint) retryOption { }} } -// WithBackoff sets the `BackoffFunc `used to control time between retries. +// WithBackoff sets the `BackoffFunc` used to control time between retries. func withBackoff(bf backoffFunc) retryOption { return retryOption{applyFunc: func(o *options) { o.backoffFunc = bf diff --git a/vendor/go.etcd.io/etcd/client/v3/sort.go b/vendor/go.etcd.io/etcd/client/v3/sort.go index 2bb9d9a13b783..9918ea927fea0 100644 --- a/vendor/go.etcd.io/etcd/client/v3/sort.go +++ b/vendor/go.etcd.io/etcd/client/v3/sort.go @@ -14,8 +14,10 @@ package clientv3 -type SortTarget int -type SortOrder int +type ( + SortTarget int + SortOrder int +) const ( SortNone SortOrder = iota diff --git a/vendor/go.etcd.io/etcd/client/v3/txn.go b/vendor/go.etcd.io/etcd/client/v3/txn.go index 22301fba6b14b..0a57332ac7893 100644 --- a/vendor/go.etcd.io/etcd/client/v3/txn.go +++ b/vendor/go.etcd.io/etcd/client/v3/txn.go @@ -18,22 +18,21 @@ import ( "context" "sync" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) // Txn is the interface that wraps mini-transactions. // -// Txn(context.TODO()).If( -// Compare(Value(k1), ">", v1), -// Compare(Version(k1), "=", 2) -// ).Then( -// OpPut(k2,v2), OpPut(k3,v3) -// ).Else( -// OpPut(k4,v4), OpPut(k5,v5) -// ).Commit() -// +// Txn(context.TODO()).If( +// Compare(Value(k1), ">", v1), +// Compare(Version(k1), "=", 2) +// ).Then( +// OpPut(k2,v2), OpPut(k3,v3) +// ).Else( +// OpPut(k4,v4), OpPut(k5,v5) +// ).Commit() type Txn interface { // If takes a list of comparison. If all comparisons passed in succeed, // the operations passed into Then() will be executed. Or the operations @@ -145,7 +144,7 @@ func (txn *txn) Commit() (*TxnResponse, error) { var err error resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) if err != nil { - return nil, toErr(txn.ctx, err) + return nil, ContextError(txn.ctx, err) } return (*TxnResponse)(resp), nil } diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go index b73925ba128af..a46f98b8e287b 100644 --- a/vendor/go.etcd.io/etcd/client/v3/watch.go +++ b/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -21,15 +21,15 @@ import ( "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/mvccpb" + v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) const ( @@ -37,6 +37,13 @@ const ( EventTypePut = mvccpb.PUT closeSendErrTimeout = 250 * time.Millisecond + + // AutoWatchID is the watcher ID passed in WatchStream.Watch when no + // user-provided ID is available. If pass, an ID will automatically be assigned. + AutoWatchID = 0 + + // InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch. + InvalidWatchID = -1 ) type Event mvccpb.Event @@ -142,12 +149,12 @@ type watcher struct { mu sync.Mutex // streams holds all the active grpc streams keyed by ctx value. - streams map[string]*watchGrpcStream + streams map[string]*watchGRPCStream lg *zap.Logger } -// watchGrpcStream tracks all watch resources attached to a single grpc stream. -type watchGrpcStream struct { +// watchGRPCStream tracks all watch resources attached to a single grpc stream. +type watchGRPCStream struct { owner *watcher remote pb.WatchClient callOpts []grpc.CallOption @@ -214,8 +221,7 @@ type watchRequest struct { } // progressRequest is issued by the subscriber to request watch progress -type progressRequest struct { -} +type progressRequest struct{} // watcherStream represents a registered watcher type watcherStream struct { @@ -244,7 +250,7 @@ func NewWatcher(c *Client) Watcher { func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { w := &watcher{ remote: wc, - streams: make(map[string]*watchGrpcStream), + streams: make(map[string]*watchGRPCStream), } if c != nil { w.callOpts = c.callOpts @@ -254,8 +260,10 @@ func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { } // never closes -var valCtxCh = make(chan struct{}) -var zeroTime = time.Unix(0, 0) +var ( + valCtxCh = make(chan struct{}) + zeroTime = time.Unix(0, 0) +) // ctx with only the values; never Done type valCtx struct{ context.Context } @@ -264,9 +272,9 @@ func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } func (vc *valCtx) Err() error { return nil } -func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { +func (w *watcher) newWatcherGRPCStream(inctx context.Context) *watchGRPCStream { ctx, cancel := context.WithCancel(&valCtx{inctx}) - wgs := &watchGrpcStream{ + wgs := &watchGRPCStream{ owner: w, remote: w.remote, callOpts: w.callOpts, @@ -327,7 +335,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch } wgs := w.streams[ctxKey] if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) + wgs = w.newWatcherGRPCStream(ctx) w.streams[ctxKey] = wgs } donec := wgs.donec @@ -388,7 +396,7 @@ func (w *watcher) Close() (err error) { } } // Consider context.Canceled as a successful close - if err == context.Canceled { + if errors.Is(err, context.Canceled) { err = nil } return err @@ -401,11 +409,11 @@ func (w *watcher) RequestProgress(ctx context.Context) (err error) { w.mu.Lock() if w.streams == nil { w.mu.Unlock() - return fmt.Errorf("no stream found for context") + return errors.New("no stream found for context") } wgs := w.streams[ctxKey] if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) + wgs = w.newWatcherGRPCStream(ctx) w.streams[ctxKey] = wgs } donec := wgs.donec @@ -428,17 +436,17 @@ func (w *watcher) RequestProgress(ctx context.Context) (err error) { } } -func (w *watchGrpcStream) close() (err error) { +func (w *watchGRPCStream) close() (err error) { w.cancel() <-w.donec select { case err = <-w.errc: default: } - return toErr(w.ctx, err) + return ContextError(w.ctx, err) } -func (w *watcher) closeStream(wgs *watchGrpcStream) { +func (w *watcher) closeStream(wgs *watchGRPCStream) { w.mu.Lock() close(wgs.donec) wgs.cancel() @@ -448,9 +456,9 @@ func (w *watcher) closeStream(wgs *watchGrpcStream) { w.mu.Unlock() } -func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { +func (w *watchGRPCStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { // check watch ID for backward compatibility (<= v3.3) - if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") { + if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") { w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) // failed; no channel close(ws.recvc) @@ -460,7 +468,7 @@ func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream w.substreams[ws.id] = ws } -func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { +func (w *watchGRPCStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { select { case ws.outc <- *resp: case <-ws.initReq.ctx.Done(): @@ -469,7 +477,7 @@ func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchRespo close(ws.outc) } -func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { +func (w *watchGRPCStream) closeSubstream(ws *watcherStream) { // send channel response in case stream was never established select { case ws.initReq.retc <- ws.outc: @@ -481,7 +489,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { } else if ws.outc != nil { close(ws.outc) } - if ws.id != -1 { + if ws.id != InvalidWatchID { delete(w.substreams, ws.id) return } @@ -494,7 +502,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { } // run is the root of the goroutines for managing a watcher client -func (w *watchGrpcStream) run() { +func (w *watchGRPCStream) run() { var wc pb.Watch_WatchClient var closeErr error @@ -533,6 +541,7 @@ func (w *watchGrpcStream) run() { cancelSet := make(map[int64]struct{}) var cur *pb.WatchResponse + backoff := time.Millisecond for { select { // Watch() requested @@ -543,7 +552,7 @@ func (w *watchGrpcStream) run() { // TODO: pass custom watch ID? ws := &watcherStream{ initReq: *wreq, - id: -1, + id: InvalidWatchID, outc: outc, // unbuffered so resumes won't cause repeat events recvc: make(chan *WatchResponse), @@ -645,10 +654,11 @@ func (w *watchGrpcStream) run() { // watch client failed on Recv; spawn another if possible case err := <-w.errc: - if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + if isHaltErr(w.ctx, err) || errors.Is(ContextError(w.ctx, err), v3rpc.ErrNoLeader) { closeErr = err return } + backoff = w.backoffIfUnavailable(backoff, err) if wc, closeErr = w.newWatchClient(); closeErr != nil { return } @@ -669,7 +679,7 @@ func (w *watchGrpcStream) run() { if len(w.substreams)+len(w.resuming) == 0 { return } - if ws.id != -1 { + if ws.id != InvalidWatchID { // client is closing an established watch; close it on the server proactively instead of waiting // to close when the next message arrives cancelSet[ws.id] = struct{}{} @@ -690,7 +700,7 @@ func (w *watchGrpcStream) run() { // nextResume chooses the next resuming to register with the grpc stream. Abandoned // streams are marked as nil in the queue since the head must wait for its inflight registration. -func (w *watchGrpcStream) nextResume() *watcherStream { +func (w *watchGRPCStream) nextResume() *watcherStream { for len(w.resuming) != 0 { if w.resuming[0] != nil { return w.resuming[0] @@ -701,7 +711,7 @@ func (w *watchGrpcStream) nextResume() *watcherStream { } // dispatchEvent sends a WatchResponse to the appropriate watcher stream -func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { +func (w *watchGRPCStream) dispatchEvent(pbresp *pb.WatchResponse) bool { events := make([]*Event, len(pbresp.Events)) for i, ev := range pbresp.Events { events[i] = (*Event)(ev) @@ -716,18 +726,17 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { cancelReason: pbresp.CancelReason, } - // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to + // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to // indicate they should be broadcast. - if wr.IsProgressNotify() && pbresp.WatchId == -1 { + if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID { return w.broadcastResponse(wr) } return w.unicastResponse(wr, pbresp.WatchId) - } // broadcastResponse send a watch response to all watch substreams. -func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { +func (w *watchGRPCStream) broadcastResponse(wr *WatchResponse) bool { for _, ws := range w.substreams { select { case ws.recvc <- wr: @@ -738,8 +747,8 @@ func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { } // unicastResponse sends a watch response to a specific watch substream. -func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool { - ws, ok := w.substreams[watchId] +func (w *watchGRPCStream) unicastResponse(wr *WatchResponse, watchID int64) bool { + ws, ok := w.substreams[watchID] if !ok { return false } @@ -752,7 +761,7 @@ func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool } // serveWatchClient forwards messages from the grpc stream to run() -func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { +func (w *watchGRPCStream) serveWatchClient(wc pb.Watch_WatchClient) { for { resp, err := wc.Recv() if err != nil { @@ -771,7 +780,7 @@ func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { } // serveSubstream forwards watch responses from run() to the subscriber -func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { +func (w *watchGRPCStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { if ws.closing { panic("created substream goroutine but substream is closing") } @@ -839,12 +848,13 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{ } } else { // current progress of watch; <= store revision - nextRev = wr.Header.Revision + nextRev = wr.Header.Revision + 1 } if len(wr.Events) > 0 { nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 } + ws.initReq.rev = nextRev // created event is already sent above, @@ -867,13 +877,13 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{ // lazily send cancel message if events on missing id } -func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { +func (w *watchGRPCStream) newWatchClient() (pb.Watch_WatchClient, error) { // mark all substreams as resuming close(w.resumec) w.resumec = make(chan struct{}) w.joinSubstreams() for _, ws := range w.substreams { - ws.id = -1 + ws.id = InvalidWatchID w.resuming = append(w.resuming, ws) } // strip out nils, if any @@ -913,7 +923,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { return wc, nil } -func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { +func (w *watchGRPCStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { var wg sync.WaitGroup wg.Add(len(w.resuming)) donec := make(chan struct{}) @@ -950,7 +960,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str } // joinSubstreams waits for all substream goroutines to complete. -func (w *watchGrpcStream) joinSubstreams() { +func (w *watchGRPCStream) joinSubstreams() { for _, ws := range w.substreams { <-ws.donec } @@ -963,10 +973,25 @@ func (w *watchGrpcStream) joinSubstreams() { var maxBackoff = 100 * time.Millisecond +func (w *watchGRPCStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration { + if isUnavailableErr(w.ctx, err) { + // retry, but backoff + if backoff < maxBackoff { + // 25% backoff factor + backoff = backoff + backoff/4 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + time.Sleep(backoff) + } + return backoff +} + // openWatchClient retries opening a watch client until success or halt. // manually retry in case "ws==nil && err==nil" // TODO: remove FailFast=false -func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { +func (w *watchGRPCStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { backoff := time.Millisecond for { select { @@ -983,17 +1008,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) if isHaltErr(w.ctx, err) { return nil, v3rpc.Error(err) } - if isUnavailableErr(w.ctx, err) { - // retry, but backoff - if backoff < maxBackoff { - // 25% backoff factor - backoff = backoff + backoff/4 - if backoff > maxBackoff { - backoff = maxBackoff - } - } - time.Sleep(backoff) - } + backoff = w.backoffIfUnavailable(backoff, err) } return ws, nil } @@ -1022,7 +1037,7 @@ func (pr *progressRequest) toPB() *pb.WatchRequest { func streamKeyFromCtx(ctx context.Context) string { if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) + return fmt.Sprintf("%+v", map[string][]string(md)) } return "" } diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/LICENSE b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go index 37475c3d6dfb7..4212198f0bdcb 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go @@ -13,7 +13,6 @@ import ( "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -231,7 +230,7 @@ func convertExponentialBuckets(bucketSpans []*dto.BucketSpan, deltas []int64) me // Increase the count index by the Offset to insert Offset zeroes countIndex += bs.GetOffset() } - for j := uint32(0); j < bs.GetLength(); j++ { + for range bs.GetLength() { // Convert deltas to the cumulative number of observations count += deltas[deltaIndex] deltaIndex++ @@ -367,11 +366,12 @@ func convertExemplar(exemplar *dto.Exemplar) metricdata.Exemplar[float64] { var traceID, spanID []byte // find the trace ID and span ID in attributes, if it exists for _, label := range exemplar.GetLabel() { - if label.GetName() == traceIDLabel { + switch label.GetName() { + case traceIDLabel: traceID = []byte(label.GetValue()) - } else if label.GetName() == spanIDLabel { + case spanIDLabel: spanID = []byte(label.GetValue()) - } else { + default: attrs = append(attrs, attribute.String(label.GetName(), label.GetValue())) } } diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/LICENSE b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go index dd12d03323e3e..842121561cfc1 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go @@ -84,10 +84,10 @@ func init() { return nil, errInvalidOTLPProtocol } }) - RegisterLogExporter("console", func(ctx context.Context) (log.Exporter, error) { + RegisterLogExporter("console", func(context.Context) (log.Exporter, error) { return stdoutlog.New() }) - RegisterLogExporter("none", func(ctx context.Context) (log.Exporter, error) { + RegisterLogExporter("none", func(context.Context) (log.Exporter, error) { return noopLogExporter{}, nil }) } diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go index f7543eeed5235..ea486a670cd7f 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go @@ -15,14 +15,14 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - - prometheusbridge "go.opentelemetry.io/contrib/bridges/prometheus" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" promexporter "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/sdk/metric" + + prometheusbridge "go.opentelemetry.io/contrib/bridges/prometheus" ) const otelExporterOTLPMetricsProtoEnvKey = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL" @@ -154,7 +154,7 @@ func init() { } return metric.NewPeriodicReader(r, readerOpts...), nil }) - RegisterMetricReader("none", func(ctx context.Context) (metric.Reader, error) { + RegisterMetricReader("none", func(context.Context) (metric.Reader, error) { return newNoopMetricReader(), nil }) RegisterMetricReader("prometheus", func(ctx context.Context) (metric.Reader, error) { @@ -211,10 +211,10 @@ func init() { return readerWithServer{lis.Addr(), reader, &server}, nil }) - RegisterMetricProducer("prometheus", func(ctx context.Context) (metric.Producer, error) { + RegisterMetricProducer("prometheus", func(context.Context) (metric.Producer, error) { return prometheusbridge.NewMetricProducer(), nil }) - RegisterMetricProducer("none", func(ctx context.Context) (metric.Producer, error) { + RegisterMetricProducer("none", func(context.Context) (metric.Producer, error) { return newNoopMetricProducer(), nil }) } diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/noop.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/noop.go index 2a8b173b2668e..36a57c9b806a5 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/noop.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/noop.go @@ -18,12 +18,12 @@ type noopSpanExporter struct{} var _ trace.SpanExporter = noopSpanExporter{} // ExportSpans is part of trace.SpanExporter interface. -func (e noopSpanExporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { +func (noopSpanExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } // Shutdown is part of trace.SpanExporter interface. -func (e noopSpanExporter) Shutdown(ctx context.Context) error { +func (noopSpanExporter) Shutdown(context.Context) error { return nil } @@ -51,7 +51,7 @@ func IsNoneMetricReader(e metric.Reader) bool { type noopMetricProducer struct{} -func (e noopMetricProducer) Produce(ctx context.Context) ([]metricdata.ScopeMetrics, error) { +func (noopMetricProducer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) { return nil, nil } @@ -65,17 +65,17 @@ type noopLogExporter struct{} var _ log.Exporter = noopLogExporter{} // ExportSpans is part of log.Exporter interface. -func (e noopLogExporter) Export(ctx context.Context, records []log.Record) error { +func (noopLogExporter) Export(context.Context, []log.Record) error { return nil } // Shutdown is part of log.Exporter interface. -func (e noopLogExporter) Shutdown(ctx context.Context) error { +func (noopLogExporter) Shutdown(context.Context) error { return nil } // ForceFlush is part of log.Exporter interface. -func (e noopLogExporter) ForceFlush(ctx context.Context) error { +func (noopLogExporter) ForceFlush(context.Context) error { return nil } diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/spans.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/spans.go index 8970d18f725f4..3b8232686b11f 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/spans.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/spans.go @@ -89,10 +89,10 @@ func init() { return nil, errInvalidOTLPProtocol } }) - RegisterSpanExporter("console", func(ctx context.Context) (trace.SpanExporter, error) { + RegisterSpanExporter("console", func(context.Context) (trace.SpanExporter, error) { return stdouttrace.New() }) - RegisterSpanExporter("none", func(ctx context.Context) (trace.SpanExporter, error) { + RegisterSpanExporter("none", func(context.Context) (trace.SpanExporter, error) { return noopSpanExporter{}, nil }) } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go index 76e924f8a02bd..086a0d9772041 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go @@ -51,18 +51,19 @@ var _ propagation.TextMapPropagator = &Jaeger{} // Inject injects a context to the carrier following jaeger format. // The parent span ID is set to an dummy parent span id as the most implementations do. -func (jaeger Jaeger) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { +func (Jaeger) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { sc := trace.SpanFromContext(ctx).SpanContext() headers := []string{} if !sc.TraceID().IsValid() || !sc.SpanID().IsValid() { return } headers = append(headers, sc.TraceID().String(), sc.SpanID().String(), deprecatedParentSpanID) - if debugFromContext(ctx) { + switch { + case debugFromContext(ctx): headers = append(headers, fmt.Sprintf("%x", flagsDebug|flagsSampled)) - } else if sc.IsSampled() { + case sc.IsSampled(): headers = append(headers, fmt.Sprintf("%x", flagsSampled)) - } else { + default: headers = append(headers, fmt.Sprintf("%x", flagsNotSampled)) } @@ -70,7 +71,7 @@ func (jaeger Jaeger) Inject(ctx context.Context, carrier propagation.TextMapCarr } // Extract extracts a context from the carrier if it contains Jaeger headers. -func (jaeger Jaeger) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { +func (Jaeger) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { // extract tracing information if h := carrier.Get(jaegerHeader); h != "" { ctx, sc, err := extract(ctx, h) @@ -151,6 +152,6 @@ func extract(ctx context.Context, headerVal string) (context.Context, trace.Span } // Fields returns the Jaeger header key whose value is set with Inject. -func (jaeger Jaeger) Fields() []string { +func (Jaeger) Fields() []string { return []string{jaegerHeader} } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go index aa90e8c580e8f..201eea0f0f243 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go @@ -5,7 +5,7 @@ package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" // Version is the current release version of the Jaeger propagator. func Version() string { - return "1.35.0" + return "1.38.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/LICENSE b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils/rate_limiter.go b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/internal/ratelimiter/rate_limiter.go similarity index 96% rename from vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils/rate_limiter.go rename to vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/internal/ratelimiter/rate_limiter.go index c452118340461..8dcaabb95a451 100644 --- a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils/rate_limiter.go +++ b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/internal/ratelimiter/rate_limiter.go @@ -16,7 +16,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -package utils // import "go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils" +// Package ratelimiter provides a rate limiter. +package ratelimiter // import "go.opentelemetry.io/contrib/samplers/jaegerremote/internal/ratelimiter" import ( "sync" diff --git a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils/http_json.go b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils/http_json.go deleted file mode 100644 index aba63afcdbb30..0000000000000 --- a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils/http_json.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Copyright (c) 2021 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package utils provides utilities for the jaegerremote package. -package utils // import "go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils" - -import ( - "encoding/json" - "fmt" - "io" - "net/http" -) - -// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`. -func GetJSON(url string, out interface{}) error { - resp, err := http.Get(url) //nolint:gosec // False positive G107: Potential HTTP request made with variable url - if err != nil { - return err - } - return ReadJSON(resp, out) -} - -// ReadJSON reads JSON from http.Response and parses it into `out`. -func ReadJSON(resp *http.Response, out interface{}) error { - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - return fmt.Errorf("status code: %d, body: %s", resp.StatusCode, body) - } - - if out == nil { - _, err := io.Copy(io.Discard, resp.Body) - if err != nil { - return err - } - return nil - } - - decoder := json.NewDecoder(resp.Body) - return decoder.Decode(out) -} diff --git a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler.go b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler.go index 06a6117593d30..259f7a669a773 100644 --- a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler.go +++ b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler.go @@ -24,10 +24,10 @@ import ( "sync" jaeger_api_v2 "github.com/jaegertracing/jaeger-idl/proto-gen/api_v2" - - "go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils" "go.opentelemetry.io/otel/sdk/trace" oteltrace "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/samplers/jaegerremote/internal/ratelimiter" ) const ( @@ -94,7 +94,7 @@ func (s *probabilisticSampler) Description() string { // number of sequential requests can be sampled each second. type rateLimitingSampler struct { maxTracesPerSecond float64 - rateLimiter *utils.RateLimiter + rateLimiter *ratelimiter.RateLimiter } // newRateLimitingSampler creates new rateLimitingSampler. @@ -105,7 +105,7 @@ func newRateLimitingSampler(maxTracesPerSecond float64) *rateLimitingSampler { func (s *rateLimitingSampler) init(maxTracesPerSecond float64) *rateLimitingSampler { if s.rateLimiter == nil { - s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) + s.rateLimiter = ratelimiter.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) } else { s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) } @@ -143,7 +143,7 @@ func (s *rateLimitingSampler) Equal(other trace.Sampler) bool { return false } -func (s *rateLimitingSampler) Description() string { +func (*rateLimitingSampler) Description() string { return "rateLimitingSampler{}" } @@ -200,7 +200,7 @@ func (s *guaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRa } } -func (s *guaranteedThroughputProbabilisticSampler) Description() string { +func (*guaranteedThroughputProbabilisticSampler) Description() string { return "guaranteedThroughputProbabilisticSampler{}" } @@ -289,7 +289,7 @@ func (s *perOperationSampler) getSamplerForOperation(operation string) trace.Sam return newSampler } -func (s *perOperationSampler) Description() string { +func (*perOperationSampler) Description() string { return "perOperationSampler{}" } diff --git a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler_remote.go b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler_remote.go index e803d75e725c0..4cc1531cf5325 100644 --- a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler_remote.go +++ b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler_remote.go @@ -30,7 +30,6 @@ import ( "github.com/gogo/protobuf/jsonpb" jaeger_api_v2 "github.com/jaegertracing/jaeger-idl/proto-gen/api_v2" - "go.opentelemetry.io/otel/sdk/trace" ) @@ -49,7 +48,7 @@ type SamplingStrategyFetcher interface { // samplingStrategyParser is used to parse sampling strategy updates. The output object // should be of the type that is recognized by the SamplerUpdaters. type samplingStrategyParser interface { - Parse(response []byte) (interface{}, error) + Parse(response []byte) (any, error) } // samplerUpdater is used by Sampler to apply sampling strategies, @@ -62,7 +61,7 @@ type samplingStrategyParser interface { // // Sampler invokes the updaters while holding a lock on the main sampler. type samplerUpdater interface { - Update(sampler trace.Sampler, strategy interface{}) (modified trace.Sampler, err error) + Update(sampler trace.Sampler, strategy any) (modified trace.Sampler, err error) } // Sampler is a delegating sampler that polls a remote server @@ -70,7 +69,7 @@ type samplerUpdater interface { // delegates to it for sampling decisions. type Sampler struct { // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. - // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq + // Cf. https://github.com/jaegertracing/jaeger-client-go/issues/155, https://pkg.go.dev/sync/atomic#pkg-note-BUG closed int64 // 0 - not closed, 1 - closed sync.RWMutex // used to serialize access to samplerConfig.sampler @@ -119,7 +118,7 @@ func (s *Sampler) Close() { } // Description returns a human-readable name for the Sampler. -func (s *Sampler) Description() string { +func (*Sampler) Description() string { return "JaegerRemoteSampler{}" } @@ -173,7 +172,7 @@ func (s *Sampler) UpdateSampler() { } // NB: this function should only be called while holding a Write lock. -func (s *Sampler) updateSamplerViaUpdaters(strategy interface{}) error { +func (s *Sampler) updateSamplerViaUpdaters(strategy any) error { for _, updater := range s.updaters { sampler, err := updater.Update(s.sampler, strategy) if err != nil { @@ -193,7 +192,7 @@ func (s *Sampler) updateSamplerViaUpdaters(strategy interface{}) error { type probabilisticSamplerUpdater struct{} // Update implements Update of samplerUpdater. -func (u *probabilisticSamplerUpdater) Update(sampler trace.Sampler, strategy interface{}) (trace.Sampler, error) { +func (*probabilisticSamplerUpdater) Update(sampler trace.Sampler, strategy any) (trace.Sampler, error) { type response interface { GetProbabilisticSampling() *jaeger_api_v2.ProbabilisticSamplingStrategy } @@ -218,7 +217,7 @@ func (u *probabilisticSamplerUpdater) Update(sampler trace.Sampler, strategy int type rateLimitingSamplerUpdater struct{} // Update implements Update of samplerUpdater. -func (u *rateLimitingSamplerUpdater) Update(sampler trace.Sampler, strategy interface{}) (trace.Sampler, error) { +func (*rateLimitingSamplerUpdater) Update(sampler trace.Sampler, strategy any) (trace.Sampler, error) { type response interface { GetRateLimitingSampling() *jaeger_api_v2.RateLimitingSamplingStrategy } @@ -246,7 +245,7 @@ type perOperationSamplerUpdater struct { } // Update implements Update of samplerUpdater. -func (u *perOperationSamplerUpdater) Update(sampler trace.Sampler, strategy interface{}) (trace.Sampler, error) { +func (u *perOperationSamplerUpdater) Update(sampler trace.Sampler, strategy any) (trace.Sampler, error) { type response interface { GetOperationSampling() *jaeger_api_v2.PerOperationSamplingStrategies } @@ -288,7 +287,7 @@ func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) v.Set("service", serviceName) uri := f.serverURL + "?" + v.Encode() - resp, err := f.httpClient.Get(uri) // nolint:bodyclose // False-positive. + resp, err := f.httpClient.Get(uri) if err != nil { return nil, err } @@ -310,7 +309,7 @@ func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) type samplingStrategyParserImpl struct{} -func (p *samplingStrategyParserImpl) Parse(response []byte) (interface{}, error) { +func (*samplingStrategyParserImpl) Parse(response []byte) (any, error) { strategy := new(jaeger_api_v2.SamplingStrategyResponse) // Official Jaeger Remote Sampling protocol contains enums encoded as strings. // Legacy protocol contains enums as numbers. diff --git a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler_remote_options.go b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler_remote_options.go index 14ba52c044bb7..3679b148db3db 100644 --- a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler_remote_options.go +++ b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/sampler_remote_options.go @@ -26,7 +26,6 @@ import ( "time" "github.com/go-logr/logr" - "go.opentelemetry.io/otel/sdk/trace" ) diff --git a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/version.go b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/version.go index 7badac8e42caa..05d8bf2944021 100644 --- a/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/version.go +++ b/vendor/go.opentelemetry.io/contrib/samplers/jaegerremote/version.go @@ -5,6 +5,6 @@ package jaegerremote // import "go.opentelemetry.io/contrib/samplers/jaegerremot // Version is the current release version of the Jaeger remote sampler. func Version() string { - return "0.30.0" + return "0.32.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go index 05abd92eeecb8..d1b31ef2aa65d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go @@ -5,9 +5,12 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o import ( "context" + "errors" "fmt" "time" + collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -20,8 +23,6 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" - collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" - logpb "go.opentelemetry.io/proto/otlp/logs/v1" ) // The methods of this type are not expected to be called concurrently. @@ -85,11 +86,12 @@ func newGRPCDialOptions(cfg config) []grpc.DialOption { dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(cfg.serviceConfig.Value)) } // Prioritize GRPCCredentials over Insecure (passing both is an error). - if cfg.gRPCCredentials.Value != nil { + switch { + case cfg.gRPCCredentials.Value != nil: dialOpts = append(dialOpts, grpc.WithTransportCredentials(cfg.gRPCCredentials.Value)) - } else if cfg.insecure.Value { + case cfg.insecure.Value: dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } else { + default: // Default to using the host's root CA. dialOpts = append(dialOpts, grpc.WithTransportCredentials( credentials.NewTLS(nil), @@ -192,7 +194,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } @@ -215,9 +217,9 @@ func newNoopClient() *noopClient { return &noopClient{} } -func (c *noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil } +func (*noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil } -func (c *noopClient) Shutdown(context.Context) error { return nil } +func (*noopClient) Shutdown(context.Context) error { return nil } // retryable returns if err identifies a request that can be retried and a // duration to wait for if an explicit throttle time is included in err. @@ -228,6 +230,8 @@ func retryable(err error) (bool, time.Duration) { func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { switch s.Code() { + // Follows the retryable error codes defined in + // https://opentelemetry.io/docs/specs/otlp/#failures case codes.Canceled, codes.DeadlineExceeded, codes.Aborted, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go index d0cc79d54ec2e..3fda9fcb0b83d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go @@ -563,7 +563,7 @@ func loadCertificates(certPath, keyPath string) ([]tls.Certificate, error) { func insecureFromScheme(prev setting[bool], scheme string) setting[bool] { if scheme == "https" { return newSetting(false) - } else if len(scheme) > 0 { + } else if scheme != "" { return newSetting(true) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go index 66895c3a1a081..898eecf773742 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go @@ -8,9 +8,10 @@ import ( "sync" "sync/atomic" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform" "go.opentelemetry.io/otel/sdk/log" - logpb "go.opentelemetry.io/proto/otlp/logs/v1" ) type logClient interface { @@ -88,6 +89,6 @@ func (e *Exporter) Shutdown(ctx context.Context) error { } // ForceFlush does nothing. The Exporter holds no state. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(context.Context) error { return nil } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go index 896c3a3032b13..fa59467747fe5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go index 954597340b482..818ecf9e9df75 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go @@ -5,5 +5,5 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use. func Version() string { - return "0.12.2" + return "0.14.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go index 3af60258a6586..59be105dbe66c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go @@ -18,12 +18,11 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" - - "go.opentelemetry.io/otel" collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" logpb "go.opentelemetry.io/proto/otlp/logs/v1" + "google.golang.org/protobuf/proto" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry" ) @@ -200,7 +199,7 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) return err } respStr := strings.TrimSpace(respData.String()) - if len(respStr) == 0 { + if respStr == "" { respStr = "(empty)" } bodyErr := fmt.Errorf("body: %s", respStr) @@ -220,7 +219,7 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) } var gzPool = sync.Pool{ - New: func() interface{} { + New: func() any { w := gzip.NewWriter(io.Discard) return w }, @@ -232,7 +231,7 @@ func (c *httpClient) newRequest(ctx context.Context, body []byte) (request, erro switch c.compression { case NoCompression: - r.ContentLength = (int64)(len(body)) + r.ContentLength = int64(len(body)) req.bodyReader = bodyReader(body) case GzipCompression: // Ensure the content length is not used. @@ -313,7 +312,7 @@ func (e retryableError) Unwrap() error { return e.err } -func (e retryableError) As(target interface{}) bool { +func (e retryableError) As(target any) bool { if e.err == nil { return false } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go index f1c8d3ae0a721..4436d0cd86481 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go @@ -58,7 +58,7 @@ func (e *Exporter) Export(ctx context.Context, records []log.Record) error { // Shutdown shuts down the Exporter. Calls to Export or ForceFlush will perform // no operation after this is called. -func (e *Exporter) Shutdown(ctx context.Context) error { +func (e *Exporter) Shutdown(context.Context) error { if e.stopped.Swap(true) { return nil } @@ -68,6 +68,6 @@ func (e *Exporter) Shutdown(ctx context.Context) error { } // ForceFlush does nothing. The Exporter holds no state. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(context.Context) error { return nil } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go index bd9a750a1f9d6..a0a9dc1334a89 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go index 9702a4c0bb5fb..a482321af3e67 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go @@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use. func Version() string { - return "0.12.2" + return "0.14.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index e0fa0570a8124..492480f8c9ae6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -5,8 +5,11 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme import ( "context" + "errors" "time" + colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -17,8 +20,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" - colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { @@ -149,7 +150,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go index 3977c1f8a6c21..35cdf4661278a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go @@ -9,12 +9,13 @@ import ( "fmt" "sync" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Exporter is a OpenTelemetry metric Exporter using gRPC. @@ -91,7 +92,7 @@ func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) e // This method returns an error if the method is canceled by the passed context. // // This method is safe to call concurrently. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(ctx context.Context) error { // The exporter and client hold no state, nothing to flush. return ctx.Err() } @@ -119,7 +120,7 @@ var errShutdown = errors.New("gRPC exporter is shutdown") type shutdownClient struct{} -func (c shutdownClient) err(ctx context.Context) error { +func (shutdownClient) err(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } @@ -135,7 +136,7 @@ func (c shutdownClient) Shutdown(ctx context.Context) error { } // MarshalLog returns logging data about the Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (*Exporter) MarshalLog() any { return struct{ Type string }{Type: "OTLP/gRPC"} } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go index cb77ae6a9d68e..758d1ea32dab4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go @@ -105,12 +105,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go index 37cc6c519ca61..80691ac3a9f6c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index 58859f2c2b8b6..7909cac56d9a9 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.36.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index 23f1f00317194..26af47e621b26 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -18,14 +18,14 @@ import ( "sync" "time" + colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" - colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { @@ -203,7 +203,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou return err } respStr := strings.TrimSpace(respData.String()) - if len(respStr) == 0 { + if respStr == "" { respStr = "(empty)" } bodyErr := fmt.Errorf("body: %s", respStr) @@ -223,7 +223,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou } var gzPool = sync.Pool{ - New: func() interface{} { + New: func() any { w := gzip.NewWriter(io.Discard) return w }, @@ -235,7 +235,7 @@ func (c *client) newRequest(ctx context.Context, body []byte) (request, error) { switch c.compression { case NoCompression: - r.ContentLength = (int64)(len(body)) + r.ContentLength = int64(len(body)) req.bodyReader = bodyReader(body) case GzipCompression: // Ensure the content length is not used. @@ -316,7 +316,7 @@ func (e retryableError) Unwrap() error { return e.err } -func (e retryableError) As(target interface{}) bool { +func (e retryableError) As(target any) bool { if e.err == nil { return false } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go index 50ac8f86ea31d..292645a38ccae 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go @@ -9,12 +9,13 @@ import ( "fmt" "sync" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Exporter is a OpenTelemetry metric Exporter using protobufs over HTTP. @@ -91,7 +92,7 @@ func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) e // This method returns an error if the method is canceled by the passed context. // // This method is safe to call concurrently. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(ctx context.Context) error { // The exporter and client hold no state, nothing to flush. return ctx.Err() } @@ -119,7 +120,7 @@ var errShutdown = errors.New("HTTP exporter is shutdown") type shutdownClient struct{} -func (c shutdownClient) err(ctx context.Context) error { +func (shutdownClient) err(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } @@ -135,7 +136,7 @@ func (c shutdownClient) Shutdown(ctx context.Context) error { } // MarshalLog returns logging data about the Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (*Exporter) MarshalLog() any { return struct{ Type string }{Type: "OTLP/HTTP"} } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go index cfe629a974607..ed66bb0682a07 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go @@ -105,12 +105,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go index c855bdc93d6cc..8a5fa80eac617 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index 52853332154dc..b8fe7cb29010e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.36.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go index ceb2d63e2a9d7..dc3542637beb1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go @@ -4,11 +4,11 @@ package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" import ( - "strings" "sync" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" @@ -19,6 +19,7 @@ import ( type config struct { registerer prometheus.Registerer disableTargetInfo bool + translationStrategy otlptranslator.TranslationStrategyOption withoutUnits bool withoutCounterSuffixes bool readerOpts []metric.ManualReaderOption @@ -27,9 +28,9 @@ type config struct { resourceAttributesFilter attribute.Filter } -var logDeprecatedLegacyScheme = sync.OnceFunc(func() { +var logTemporaryDefault = sync.OnceFunc(func() { global.Warn( - "prometheus exporter legacy scheme deprecated: support for the legacy NameValidationScheme will be removed in a future release", + "The default Prometheus naming translation strategy is planned to be changed from otlptranslator.NoUTF8EscapingWithSuffixes to otlptranslator.UnderscoreEscapingWithSuffixes in a future release. Add prometheus.WithTranslationStrategy(otlptranslator.NoUTF8EscapingWithSuffixes) to preserve the existing behavior, or prometheus.WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes) to opt into the future default behavior.", ) }) @@ -40,6 +41,30 @@ func newConfig(opts ...Option) config { cfg = opt.apply(cfg) } + if cfg.translationStrategy == "" { + // If no translation strategy was specified, deduce one based on the global + // NameValidationScheme. NOTE: this logic will change in the future, always + // defaulting to UnderscoreEscapingWithSuffixes + + //nolint:staticcheck // NameValidationScheme is deprecated but we still need it for now. + if model.NameValidationScheme == model.UTF8Validation { + logTemporaryDefault() + cfg.translationStrategy = otlptranslator.NoUTF8EscapingWithSuffixes + } else { + cfg.translationStrategy = otlptranslator.UnderscoreEscapingWithSuffixes + } + } else { + // Note, if the translation strategy implies that suffixes should be added, + // the user can still use WithoutUnits and WithoutCounterSuffixes to + // explicitly disable specific suffixes. We do not override their preference + // in this case. However if the chosen strategy disables suffixes, we should + // forcibly disable all of them. + if !cfg.translationStrategy.ShouldAddSuffixes() { + cfg.withoutCounterSuffixes = true + cfg.withoutUnits = true + } + } + if cfg.registerer == nil { cfg.registerer = prometheus.DefaultRegisterer } @@ -97,6 +122,30 @@ func WithoutTargetInfo() Option { }) } +// WithTranslationStrategy provides a standardized way to define how metric and +// label names should be handled during translation to Prometheus format. See: +// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.48.0/specification/metrics/sdk_exporters/prometheus.md#configuration. +// The recommended approach is to use either +// [otlptranslator.UnderscoreEscapingWithSuffixes] for full Prometheus-style +// compatibility or [otlptranslator.NoTranslation] for OpenTelemetry-style names. +// +// By default, if the NameValidationScheme variable in +// [github.com/prometheus/common/model] is "legacy", the default strategy is +// [otlptranslator.UnderscoreEscapingWithSuffixes]. If the validation scheme is +// "utf8", then currently the default Strategy is +// [otlptranslator.NoUTF8EscapingWithSuffixes]. +// +// Notice: It is planned that a future release of this SDK will change the +// default to always be [otlptranslator.UnderscoreEscapingWithSuffixes] in all +// circumstances. Users wanting a different translation strategy should specify +// it explicitly. +func WithTranslationStrategy(strategy otlptranslator.TranslationStrategyOption) Option { + return optionFunc(func(cfg config) config { + cfg.translationStrategy = strategy + return cfg + }) +} + // WithoutUnits disables exporter's addition of unit suffixes to metric names, // and will also prevent unit comments from being added in OpenMetrics once // unit comments are supported. @@ -105,6 +154,12 @@ func WithoutTargetInfo() Option { // conventions. For example, the counter metric request.duration, with unit // milliseconds would become request_duration_milliseconds_total. // With this option set, the name would instead be request_duration_total. +// +// Can be used in conjunction with [WithTranslationStrategy] to disable unit +// suffixes in strategies that would otherwise add suffixes, but this behavior +// is not recommended and may be removed in a future release. +// +// Deprecated: Use [WithTranslationStrategy] instead. func WithoutUnits() Option { return optionFunc(func(cfg config) config { cfg.withoutUnits = true @@ -112,12 +167,19 @@ func WithoutUnits() Option { }) } -// WithoutCounterSuffixes disables exporter's addition _total suffixes on counters. +// WithoutCounterSuffixes disables exporter's addition _total suffixes on +// counters. // // By default, metric names include a _total suffix to follow Prometheus naming // conventions. For example, the counter metric happy.people would become // happy_people_total. With this option set, the name would instead be // happy_people. +// +// Can be used in conjunction with [WithTranslationStrategy] to disable counter +// suffixes in strategies that would otherwise add suffixes, but this behavior +// is not recommended and may be removed in a future release. +// +// Deprecated: Use [WithTranslationStrategy] instead. func WithoutCounterSuffixes() Option { return optionFunc(func(cfg config) config { cfg.withoutCounterSuffixes = true @@ -125,9 +187,8 @@ func WithoutCounterSuffixes() Option { }) } -// WithoutScopeInfo configures the Exporter to not export the otel_scope_info metric. -// If not specified, the Exporter will create a otel_scope_info metric containing -// the metrics' Instrumentation Scope, and also add labels about Instrumentation Scope to all metric points. +// WithoutScopeInfo configures the Exporter to not export +// labels about Instrumentation Scope to all metric points. func WithoutScopeInfo() Option { return optionFunc(func(cfg config) config { cfg.disableScopeInfo = true @@ -135,22 +196,13 @@ func WithoutScopeInfo() Option { }) } -// WithNamespace configures the Exporter to prefix metric with the given namespace. -// Metadata metrics such as target_info and otel_scope_info are not prefixed since these -// have special behavior based on their name. +// WithNamespace configures the Exporter to prefix metric with the given +// namespace. Metadata metrics such as target_info are not prefixed since these +// have special behavior based on their name. Namespaces will be prepended even +// if [otlptranslator.NoTranslation] is set as a translation strategy. If the provided namespace +// is empty, nothing will be prepended to metric names. func WithNamespace(ns string) Option { return optionFunc(func(cfg config) config { - if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. - logDeprecatedLegacyScheme() - // Only sanitize if prometheus does not support UTF-8. - ns = model.EscapeName(ns, model.NameEscapingScheme) - } - if !strings.HasSuffix(ns, "_") { - // namespace and metric names should be separated with an underscore, - // adds a trailing underscore if there is not one already. - ns = ns + "_" - } - cfg.namespace = ns return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go index e0959641caf06..0f29c0abbdee5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go @@ -15,41 +15,31 @@ import ( "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" ) const ( - targetInfoMetricName = "target_info" targetInfoDescription = "Target metadata" - scopeInfoMetricName = "otel_scope_info" - scopeInfoDescription = "Instrumentation Scope metadata" - - scopeNameLabel = "otel_scope_name" - scopeVersionLabel = "otel_scope_version" - - traceIDExemplarKey = "trace_id" - spanIDExemplarKey = "span_id" + scopeLabelPrefix = "otel_scope_" + scopeNameLabel = scopeLabelPrefix + "name" + scopeVersionLabel = scopeLabelPrefix + "version" + scopeSchemaLabel = scopeLabelPrefix + "schema_url" ) -var ( - errScopeInvalid = errors.New("invalid scope") - - metricsPool = sync.Pool{ - New: func() interface{} { - return &metricdata.ResourceMetrics{} - }, - } -) +var metricsPool = sync.Pool{ + New: func() any { + return &metricdata.ResourceMetrics{} + }, +} // Exporter is a Prometheus Exporter that embeds the OTel metric.Reader // interface for easy instantiation with a MeterProvider. @@ -58,7 +48,7 @@ type Exporter struct { } // MarshalLog returns logging data about the Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (e *Exporter) MarshalLog() any { const t = "Prometheus exporter" if r, ok := e.Reader.(*metric.ManualReader); ok { @@ -97,16 +87,13 @@ type collector struct { mu sync.Mutex // mu protects all members below from the concurrent access. disableTargetInfo bool targetInfo prometheus.Metric - scopeInfos map[instrumentation.Scope]prometheus.Metric - scopeInfosInvalid map[instrumentation.Scope]struct{} metricFamilies map[string]*dto.MetricFamily resourceKeyVals keyVals + metricNamer otlptranslator.MetricNamer + labelNamer otlptranslator.LabelNamer + unitNamer otlptranslator.UnitNamer } -// prometheus counters MUST have a _total suffix by default: -// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/compatibility/prometheus_and_openmetrics.md -const counterSuffix = "total" - // New returns a Prometheus Exporter. func New(opts ...Option) (*Exporter, error) { cfg := newConfig(opts...) @@ -116,17 +103,30 @@ func New(opts ...Option) (*Exporter, error) { // TODO (#3244): Enable some way to configure the reader, but not change temporality. reader := metric.NewManualReader(cfg.readerOpts...) + labelNamer := otlptranslator.LabelNamer{UTF8Allowed: !cfg.translationStrategy.ShouldEscape()} + escapedNamespace := cfg.namespace + if escapedNamespace != "" { + var err error + // If the namespace needs to be escaped, do that now when creating the new + // Collector object. The escaping is not persisted in the Config itself. + escapedNamespace, err = labelNamer.Build(escapedNamespace) + if err != nil { + return nil, err + } + } + collector := &collector{ reader: reader, disableTargetInfo: cfg.disableTargetInfo, withoutUnits: cfg.withoutUnits, withoutCounterSuffixes: cfg.withoutCounterSuffixes, disableScopeInfo: cfg.disableScopeInfo, - scopeInfos: make(map[instrumentation.Scope]prometheus.Metric), - scopeInfosInvalid: make(map[instrumentation.Scope]struct{}), metricFamilies: make(map[string]*dto.MetricFamily), - namespace: cfg.namespace, + namespace: escapedNamespace, resourceAttributesFilter: cfg.resourceAttributesFilter, + metricNamer: otlptranslator.NewMetricNamer(escapedNamespace, cfg.translationStrategy), + unitNamer: otlptranslator.UnitNamer{UTF8Allowed: !cfg.translationStrategy.ShouldEscape()}, + labelNamer: labelNamer, } if err := cfg.registerer.Register(collector); err != nil { @@ -141,7 +141,7 @@ func New(opts ...Option) (*Exporter, error) { } // Describe implements prometheus.Collector. -func (c *collector) Describe(ch chan<- *prometheus.Desc) { +func (*collector) Describe(chan<- *prometheus.Desc) { // The Opentelemetry SDK doesn't have information on which will exist when the collector // is registered. By returning nothing we are an "unchecked" collector in Prometheus, // and assume responsibility for consistency of the metrics produced. @@ -174,7 +174,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { defer c.mu.Unlock() if c.targetInfo == nil && !c.disableTargetInfo { - targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource) + targetInfo, err := c.createInfoMetric( + otlptranslator.TargetInfoMetricName, + targetInfoDescription, + metrics.Resource, + ) if err != nil { // If the target info metric is invalid, disable sending it. c.disableTargetInfo = true @@ -191,7 +195,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } if c.resourceAttributesFilter != nil && len(c.resourceKeyVals.keys) == 0 { - c.createResourceAttributes(metrics.Resource) + err := c.createResourceAttributes(metrics.Resource) + if err != nil { + otel.Handle(err) + return + } } for _, scopeMetrics := range metrics.ScopeMetrics { @@ -202,20 +210,19 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } if !c.disableScopeInfo { - scopeInfo, err := c.scopeInfo(scopeMetrics.Scope) - if errors.Is(err, errScopeInvalid) { - // Do not report the same error multiple times. - continue - } + kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel) + kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL) + + attrKeys, attrVals, err := getAttrs(scopeMetrics.Scope.Attributes, c.labelNamer) if err != nil { otel.Handle(err) continue } - - ch <- scopeInfo - - kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel) - kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version) + for i := range attrKeys { + attrKeys[i] = scopeLabelPrefix + attrKeys[i] + } + kv.keys = append(kv.keys, attrKeys...) + kv.vals = append(kv.vals, attrVals...) } kv.keys = append(kv.keys, c.resourceKeyVals.keys...) @@ -226,7 +233,13 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { if typ == nil { continue } - name := c.getName(m, typ) + name, err := c.getName(m) + if err != nil { + // TODO(#7066): Handle this error better. It's not clear this can be + // reached, bad metric names should / will be caught at creation time. + otel.Handle(err) + continue + } drop, help := c.validateMetrics(name, m.Description, typ) if drop { @@ -239,57 +252,135 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { switch v := m.Data.(type) { case metricdata.Histogram[int64]: - addHistogramMetric(ch, v, m, name, kv) + addHistogramMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Histogram[float64]: - addHistogramMetric(ch, v, m, name, kv) + addHistogramMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.ExponentialHistogram[int64]: - addExponentialHistogramMetric(ch, v, m, name, kv) + addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.ExponentialHistogram[float64]: - addExponentialHistogramMetric(ch, v, m, name, kv) + addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Sum[int64]: - addSumMetric(ch, v, m, name, kv) + addSumMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Sum[float64]: - addSumMetric(ch, v, m, name, kv) + addSumMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Gauge[int64]: - addGaugeMetric(ch, v, m, name, kv) + addGaugeMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Gauge[float64]: - addGaugeMetric(ch, v, m, name, kv) + addGaugeMetric(ch, v, m, name, kv, c.labelNamer) } } } } +// downscaleExponentialBucket re-aggregates bucket counts when downscaling to a coarser resolution. +func downscaleExponentialBucket(bucket metricdata.ExponentialBucket, scaleDelta int32) metricdata.ExponentialBucket { + if len(bucket.Counts) == 0 || scaleDelta < 1 { + return metricdata.ExponentialBucket{ + Offset: bucket.Offset >> scaleDelta, + Counts: append([]uint64(nil), bucket.Counts...), // copy slice + } + } + + // The new offset is scaled down + newOffset := bucket.Offset >> scaleDelta + + // Pre-calculate the new bucket count to avoid growing slice + // Each group of 2^scaleDelta buckets will merge into one bucket + //nolint:gosec // Length is bounded by slice allocation + lastBucketIdx := bucket.Offset + int32(len(bucket.Counts)) - 1 + lastNewIdx := lastBucketIdx >> scaleDelta + newBucketCount := int(lastNewIdx - newOffset + 1) + + if newBucketCount <= 0 { + return metricdata.ExponentialBucket{ + Offset: newOffset, + Counts: []uint64{}, + } + } + + newCounts := make([]uint64, newBucketCount) + + // Merge buckets according to the scale difference + for i, count := range bucket.Counts { + if count == 0 { + continue + } + + // Calculate which new bucket this count belongs to + //nolint:gosec // Index is bounded by loop iteration + originalIdx := bucket.Offset + int32(i) + newIdx := originalIdx >> scaleDelta + + // Calculate the position in the new counts array + position := newIdx - newOffset + //nolint:gosec // Length is bounded by allocation + if position >= 0 && position < int32(len(newCounts)) { + newCounts[position] += count + } + } + + return metricdata.ExponentialBucket{ + Offset: newOffset, + Counts: newCounts, + } +} + func addExponentialHistogramMetric[N int64 | float64]( ch chan<- prometheus.Metric, histogram metricdata.ExponentialHistogram[N], m metricdata.Metrics, name string, kv keyVals, + labelNamer otlptranslator.LabelNamer, ) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values, err := getAttrs(dp.Attributes, labelNamer) + if err != nil { + otel.Handle(err) + continue + } keys = append(keys, kv.keys...) values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) + // Prometheus native histograms support scales in the range [-4, 8] + scale := dp.Scale + if scale < -4 { + // Reject scales below -4 as they cannot be represented in Prometheus + otel.Handle(fmt.Errorf( + "exponential histogram scale %d is below minimum supported scale -4, skipping data point", + scale)) + continue + } + + // If scale > 8, we need to downscale the buckets to match the clamped scale + positiveBucket := dp.PositiveBucket + negativeBucket := dp.NegativeBucket + if scale > 8 { + scaleDelta := scale - 8 + positiveBucket = downscaleExponentialBucket(dp.PositiveBucket, scaleDelta) + negativeBucket = downscaleExponentialBucket(dp.NegativeBucket, scaleDelta) + scale = 8 + } + // From spec: note that Prometheus Native Histograms buckets are indexed by upper boundary while Exponential Histograms are indexed by lower boundary, the result being that the Offset fields are different-by-one. positiveBuckets := make(map[int]int64) - for i, c := range dp.PositiveBucket.Counts { + for i, c := range positiveBucket.Counts { if c > math.MaxInt64 { otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c)) continue } - positiveBuckets[int(dp.PositiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + positiveBuckets[int(positiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. } negativeBuckets := make(map[int]int64) - for i, c := range dp.NegativeBucket.Counts { + for i, c := range negativeBucket.Counts { if c > math.MaxInt64 { otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c)) continue } - negativeBuckets[int(dp.NegativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + negativeBuckets[int(negativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. } m, err := prometheus.NewConstNativeHistogram( @@ -299,7 +390,7 @@ func addExponentialHistogramMetric[N int64 | float64]( positiveBuckets, negativeBuckets, dp.ZeroCount, - dp.Scale, + scale, dp.ZeroThreshold, dp.StartTime, values...) @@ -307,8 +398,7 @@ func addExponentialHistogramMetric[N int64 | float64]( otel.Handle(err) continue } - - // TODO(GiedriusS): add exemplars here after https://github.com/prometheus/client_golang/pull/1654#pullrequestreview-2434669425 is done. + m = addExemplars(m, dp.Exemplars, labelNamer) ch <- m } } @@ -319,9 +409,14 @@ func addHistogramMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + labelNamer otlptranslator.LabelNamer, ) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values, err := getAttrs(dp.Attributes, labelNamer) + if err != nil { + otel.Handle(err) + continue + } keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -338,7 +433,7 @@ func addHistogramMetric[N int64 | float64]( otel.Handle(err) continue } - m = addExemplars(m, dp.Exemplars) + m = addExemplars(m, dp.Exemplars, labelNamer) ch <- m } } @@ -349,6 +444,7 @@ func addSumMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + labelNamer otlptranslator.LabelNamer, ) { valueType := prometheus.CounterValue if !sum.IsMonotonic { @@ -356,7 +452,11 @@ func addSumMetric[N int64 | float64]( } for _, dp := range sum.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values, err := getAttrs(dp.Attributes, labelNamer) + if err != nil { + otel.Handle(err) + continue + } keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -369,7 +469,7 @@ func addSumMetric[N int64 | float64]( // GaugeValues don't support Exemplars at this time // https://github.com/prometheus/client_golang/blob/aef8aedb4b6e1fb8ac1c90790645169125594096/prometheus/metric.go#L199 if valueType != prometheus.GaugeValue { - m = addExemplars(m, dp.Exemplars) + m = addExemplars(m, dp.Exemplars, labelNamer) } ch <- m } @@ -381,9 +481,14 @@ func addGaugeMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + labelNamer otlptranslator.LabelNamer, ) { for _, dp := range gauge.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values, err := getAttrs(dp.Attributes, labelNamer) + if err != nil { + otel.Handle(err) + continue + } keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -399,12 +504,12 @@ func addGaugeMetric[N int64 | float64]( // getAttrs converts the attribute.Set to two lists of matching Prometheus-style // keys and values. -func getAttrs(attrs attribute.Set) ([]string, []string) { +func getAttrs(attrs attribute.Set, labelNamer otlptranslator.LabelNamer) ([]string, []string, error) { keys := make([]string, 0, attrs.Len()) values := make([]string, 0, attrs.Len()) itr := attrs.Iter() - if model.NameValidationScheme == model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. + if labelNamer.UTF8Allowed { // Do not perform sanitization if prometheus supports UTF-8. for itr.Next() { kv := itr.Attribute() @@ -417,7 +522,11 @@ func getAttrs(attrs attribute.Set) ([]string, []string) { keysMap := make(map[string][]string) for itr.Next() { kv := itr.Attribute() - key := model.EscapeName(string(kv.Key), model.NameEscapingScheme) + key, err := labelNamer.Build(string(kv.Key)) + if err != nil { + // TODO(#7066) Handle this error better. + return nil, nil, err + } if _, ok := keysMap[key]; !ok { keysMap[key] = []string{kv.Value.Emit()} } else { @@ -431,101 +540,32 @@ func getAttrs(attrs attribute.Set) ([]string, []string) { values = append(values, strings.Join(vals, ";")) } } - return keys, values + return keys, values, nil } -func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) { - keys, values := getAttrs(*res.Set()) +func (c *collector) createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) { + keys, values, err := getAttrs(*res.Set(), c.labelNamer) + if err != nil { + return nil, err + } desc := prometheus.NewDesc(name, description, keys, nil) return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } -func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) { - attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version - attrs = append(attrs, scope.Attributes.ToSlice()...) - attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name)) - attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version)) - - keys, values := getAttrs(attribute.NewSet(attrs...)) - desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil) - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) -} - -var unitSuffixes = map[string]string{ - // Time - "d": "days", - "h": "hours", - "min": "minutes", - "s": "seconds", - "ms": "milliseconds", - "us": "microseconds", - "ns": "nanoseconds", - - // Bytes - "By": "bytes", - "KiBy": "kibibytes", - "MiBy": "mebibytes", - "GiBy": "gibibytes", - "TiBy": "tibibytes", - "KBy": "kilobytes", - "MBy": "megabytes", - "GBy": "gigabytes", - "TBy": "terabytes", - - // SI - "m": "meters", - "V": "volts", - "A": "amperes", - "J": "joules", - "W": "watts", - "g": "grams", - - // Misc - "Cel": "celsius", - "Hz": "hertz", - "1": "ratio", - "%": "percent", -} - -// getName returns the sanitized name, prefixed with the namespace and suffixed with unit. -func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { - name := m.Name - if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. - // Only sanitize if prometheus does not support UTF-8. - logDeprecatedLegacyScheme() - name = model.EscapeName(name, model.NameEscapingScheme) +// getName returns the sanitized name, translated according to the selected +// TranslationStrategy and namespace option. +func (c *collector) getName(m metricdata.Metrics) (string, error) { + translatorMetric := otlptranslator.Metric{ + Name: m.Name, + Type: c.namingMetricType(m), } - addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER - if addCounterSuffix { - // Remove the _total suffix here, as we will re-add the total suffix - // later, and it needs to come after the unit suffix. - name = strings.TrimSuffix(name, counterSuffix) - // If the last character is an underscore, or would be converted to an underscore, trim it from the name. - // an underscore will be added back in later. - if convertsToUnderscore(rune(name[len(name)-1])) { - name = name[:len(name)-1] - } - } - if c.namespace != "" { - name = c.namespace + name - } - if suffix, ok := unitSuffixes[m.Unit]; ok && !c.withoutUnits && !strings.HasSuffix(name, suffix) { - name += "_" + suffix + if !c.withoutUnits { + translatorMetric.Unit = m.Unit } - if addCounterSuffix { - name += "_" + counterSuffix - } - return name -} - -// convertsToUnderscore returns true if the character would be converted to an -// underscore when the escaping scheme is underscore escaping. This is meant to -// capture any character that should be considered a "delimiter". -func convertsToUnderscore(b rune) bool { - return (b < 'a' || b > 'z') && (b < 'A' || b > 'Z') && b != ':' && (b < '0' || b > '9') + return c.metricNamer.Build(translatorMetric) } -func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType { +func (*collector) metricType(m metricdata.Metrics) *dto.MetricType { switch v := m.Data.(type) { case metricdata.ExponentialHistogram[int64], metricdata.ExponentialHistogram[float64]: return dto.MetricType_HISTOGRAM.Enum() @@ -547,37 +587,47 @@ func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType { return nil } -func (c *collector) createResourceAttributes(res *resource.Resource) { - c.mu.Lock() - defer c.mu.Unlock() - - resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter) - resourceKeys, resourceValues := getAttrs(resourceAttrs) - c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues} +// namingMetricType provides the metric type for naming purposes. +func (c *collector) namingMetricType(m metricdata.Metrics) otlptranslator.MetricType { + switch v := m.Data.(type) { + case metricdata.ExponentialHistogram[int64], metricdata.ExponentialHistogram[float64]: + return otlptranslator.MetricTypeHistogram + case metricdata.Histogram[int64], metricdata.Histogram[float64]: + return otlptranslator.MetricTypeHistogram + case metricdata.Sum[float64]: + // If counter suffixes are disabled, treat them like non-monotonic + // suffixes for the purposes of naming. + if v.IsMonotonic && !c.withoutCounterSuffixes { + return otlptranslator.MetricTypeMonotonicCounter + } + return otlptranslator.MetricTypeNonMonotonicCounter + case metricdata.Sum[int64]: + // If counter suffixes are disabled, treat them like non-monotonic + // suffixes for the purposes of naming. + if v.IsMonotonic && !c.withoutCounterSuffixes { + return otlptranslator.MetricTypeMonotonicCounter + } + return otlptranslator.MetricTypeNonMonotonicCounter + case metricdata.Gauge[int64], metricdata.Gauge[float64]: + return otlptranslator.MetricTypeGauge + case metricdata.Summary: + return otlptranslator.MetricTypeSummary + } + return otlptranslator.MetricTypeUnknown } -func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) { +func (c *collector) createResourceAttributes(res *resource.Resource) error { c.mu.Lock() defer c.mu.Unlock() - scopeInfo, ok := c.scopeInfos[scope] - if ok { - return scopeInfo, nil - } - - if _, ok := c.scopeInfosInvalid[scope]; ok { - return nil, errScopeInvalid - } - - scopeInfo, err := createScopeInfoMetric(scope) + resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter) + resourceKeys, resourceValues, err := getAttrs(resourceAttrs, c.labelNamer) if err != nil { - c.scopeInfosInvalid[scope] = struct{}{} - return nil, fmt.Errorf("cannot create scope info metric: %w", err) + return err } - c.scopeInfos[scope] = scopeInfo - - return scopeInfo, nil + c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues} + return nil } func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) { @@ -618,16 +668,24 @@ func (c *collector) validateMetrics(name, description string, metricType *dto.Me return false, "" } -func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata.Exemplar[N]) prometheus.Metric { +func addExemplars[N int64 | float64]( + m prometheus.Metric, + exemplars []metricdata.Exemplar[N], + labelNamer otlptranslator.LabelNamer, +) prometheus.Metric { if len(exemplars) == 0 { return m } promExemplars := make([]prometheus.Exemplar, len(exemplars)) for i, exemplar := range exemplars { - labels := attributesToLabels(exemplar.FilteredAttributes) + labels, err := attributesToLabels(exemplar.FilteredAttributes, labelNamer) + if err != nil { + otel.Handle(err) + return m + } // Overwrite any existing trace ID or span ID attributes - labels[traceIDExemplarKey] = hex.EncodeToString(exemplar.TraceID[:]) - labels[spanIDExemplarKey] = hex.EncodeToString(exemplar.SpanID[:]) + labels[otlptranslator.ExemplarTraceIDKey] = hex.EncodeToString(exemplar.TraceID) + labels[otlptranslator.ExemplarSpanIDKey] = hex.EncodeToString(exemplar.SpanID) promExemplars[i] = prometheus.Exemplar{ Value: float64(exemplar.Value), Timestamp: exemplar.Time, @@ -644,11 +702,14 @@ func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata return metricWithExemplar } -func attributesToLabels(attrs []attribute.KeyValue) prometheus.Labels { +func attributesToLabels(attrs []attribute.KeyValue, labelNamer otlptranslator.LabelNamer) (prometheus.Labels, error) { labels := make(map[string]string) for _, attr := range attrs { - key := model.EscapeName(string(attr.Key), model.NameEscapingScheme) - labels[key] = attr.Value.Emit() + name, err := labelNamer.Build(string(attr.Key)) + if err != nil { + return nil, err + } + labels[name] = attr.Value.Emit() } - return labels + return labels, nil } diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/exporter.go index e2bf9bfa2ea6a..3d48d67081e2c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/exporter.go @@ -67,6 +67,6 @@ func (e *Exporter) Shutdown(context.Context) error { } // ForceFlush performs no action. -func (e *Exporter) ForceFlush(context.Context) error { +func (*Exporter) ForceFlush(context.Context) error { return nil } diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go index 43aba8a5c0adb..6cb0c8c01d6d6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go @@ -27,7 +27,7 @@ type value struct { func (v value) MarshalJSON() ([]byte, error) { var jsonVal struct { Type string - Value interface{} + Value any } jsonVal.Type = v.Kind().String() @@ -106,7 +106,7 @@ func (e *Exporter) newRecordJSON(r sdklog.Record) recordJSON { Attributes: make([]keyValue, 0, r.AttributesLen()), - Resource: &res, + Resource: res, Scope: r.InstrumentationScope(), DroppedAttributes: r.DroppedAttributes(), diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go index eff7730cdc923..648bc0749fc4c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go @@ -3,4 +3,7 @@ // Package stdouttrace contains an OpenTelemetry exporter for tracing // telemetry to be written to an output destination as JSON. +// +// See [go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x] for information about +// the experimental features. package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter/counter.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter/counter.go new file mode 100644 index 0000000000000..8c780afb024d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter/counter.go @@ -0,0 +1,31 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package counter provides a simple counter for generating unique IDs. +// +// This package is used to generate unique IDs while allowing testing packages +// to reset the counter. +package counter // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter" + +import "sync/atomic" + +// exporterN is a global 0-based count of the number of exporters created. +var exporterN atomic.Int64 + +// NextExporterID returns the next unique ID for an exporter. +func NextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc +} + +// SetExporterID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetExporterID(v int64) int64 { + return exporterN.Swap(v) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md new file mode 100644 index 0000000000000..6b7d1aec8769f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md @@ -0,0 +1,36 @@ +# Experimental Features + +The `stdouttrace` exporter contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the `stdouttrace` exporter prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These features may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Self-Observability](#self-observability) + +### Self-Observability + +The `stdouttrace` exporter provides a self-observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.exporter.span.inflight` +- `otel.sdk.exporter.span.exported` +- `otel.sdk.exporter.operation.duration` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go new file mode 100644 index 0000000000000..55bb98a965840 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/stdout/stdouttrace]. +package x // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" + +import ( + "os" + "strings" +) + +// SelfObservability is an experimental feature flag that determines if SDK +// self-observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go index bdb915ba803e5..d61324d2ee94f 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go @@ -6,13 +6,28 @@ package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdoutt import ( "context" "encoding/json" + "errors" + "fmt" "sync" "time" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" ) +// otelComponentType is a name identifying the type of the OpenTelemetry +// component. It is not a standardized OTel component type, so it uses the +// Go package prefixed type name to ensure uniqueness and identity. +const otelComponentType = "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter" + var zeroTime time.Time var _ trace.SpanExporter = &Exporter{} @@ -26,10 +41,45 @@ func New(options ...Option) (*Exporter, error) { enc.SetIndent("", "\t") } - return &Exporter{ + exporter := &Exporter{ encoder: enc, timestamps: cfg.Timestamps, - }, nil + } + + if !x.SelfObservability.Enabled() { + return exporter, nil + } + + exporter.selfObservabilityEnabled = true + exporter.selfObservabilityAttrs = []attribute.KeyValue{ + semconv.OTelComponentName(fmt.Sprintf("%s/%d", otelComponentType, counter.NextExporterID())), + semconv.OTelComponentTypeKey.String(otelComponentType), + } + s := attribute.NewSet(exporter.selfObservabilityAttrs...) + exporter.selfObservabilitySetOpt = metric.WithAttributeSet(s) + + mp := otel.GetMeterProvider() + m := mp.Meter( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err, e error + if exporter.spanInflightMetric, e = otelconv.NewSDKExporterSpanInflight(m); e != nil { + e = fmt.Errorf("failed to create span inflight metric: %w", e) + err = errors.Join(err, e) + } + if exporter.spanExportedMetric, e = otelconv.NewSDKExporterSpanExported(m); e != nil { + e = fmt.Errorf("failed to create span exported metric: %w", e) + err = errors.Join(err, e) + } + if exporter.operationDurationMetric, e = otelconv.NewSDKExporterOperationDuration(m); e != nil { + e = fmt.Errorf("failed to create operation duration metric: %w", e) + err = errors.Join(err, e) + } + + return exporter, err } // Exporter is an implementation of trace.SpanSyncer that writes spans to stdout. @@ -40,10 +90,110 @@ type Exporter struct { stoppedMu sync.RWMutex stopped bool + + selfObservabilityEnabled bool + selfObservabilityAttrs []attribute.KeyValue // selfObservability common attributes + selfObservabilitySetOpt metric.MeasurementOption + spanInflightMetric otelconv.SDKExporterSpanInflight + spanExportedMetric otelconv.SDKExporterSpanExported + operationDurationMetric otelconv.SDKExporterOperationDuration } +var ( + measureAttrsPool = sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + // ExportSpans writes spans in json format to stdout. -func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) (err error) { + var success int64 + if e.selfObservabilityEnabled { + count := int64(len(spans)) + + addOpt := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *addOpt = (*addOpt)[:0] + addOptPool.Put(addOpt) + }() + + *addOpt = append(*addOpt, e.selfObservabilitySetOpt) + + e.spanInflightMetric.Inst().Add(ctx, count, *addOpt...) + defer func(starting time.Time) { + e.spanInflightMetric.Inst().Add(ctx, -count, *addOpt...) + + // Record the success and duration of the operation. + // + // Do not exclude 0 values, as they are valid and indicate no spans + // were exported which is meaningful for certain aggregations. + e.spanExportedMetric.Inst().Add(ctx, success, *addOpt...) + + mOpt := e.selfObservabilitySetOpt + if err != nil { + // additional attributes for self-observability, + // only spanExportedMetric and operationDurationMetric are supported. + attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // reset the slice for reuse + measureAttrsPool.Put(attrs) + }() + *attrs = append(*attrs, e.selfObservabilityAttrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + set := attribute.NewSet(*attrs...) + mOpt = metric.WithAttributeSet(set) + + // Reset addOpt with new attribute set. + *addOpt = append((*addOpt)[:0], mOpt) + + e.spanExportedMetric.Inst().Add( + ctx, + count-success, + *addOpt..., + ) + } + + recordOpt := recordOptPool.Get().(*[]metric.RecordOption) + defer func() { + *recordOpt = (*recordOpt)[:0] + recordOptPool.Put(recordOpt) + }() + + *recordOpt = append(*recordOpt, mOpt) + e.operationDurationMetric.Inst().Record( + ctx, + time.Since(starting).Seconds(), + *recordOpt..., + ) + }(time.Now()) + } + if err := ctx.Err(); err != nil { return err } @@ -75,15 +225,17 @@ func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) } // Encode span stubs, one by one - if err := e.encoder.Encode(stub); err != nil { - return err + if e := e.encoder.Encode(stub); e != nil { + err = errors.Join(err, fmt.Errorf("failed to encode span %d: %w", i, e)) + continue } + success++ } - return nil + return err } // Shutdown is called to stop the exporter, it performs no action. -func (e *Exporter) Shutdown(ctx context.Context) error { +func (e *Exporter) Shutdown(context.Context) error { e.stoppedMu.Lock() e.stopped = true e.stoppedMu.Unlock() @@ -92,7 +244,7 @@ func (e *Exporter) Shutdown(ctx context.Context) error { } // MarshalLog is the marshaling function used by the logging system to represent this Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (e *Exporter) MarshalLog() any { return struct { Type string WithTimestamps bool diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE index 261eeb9e9f8b2..f1aee0f11001c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE +++ b/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go index b91741d5882b3..462eb1c3afc2a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go @@ -329,7 +329,7 @@ func (q *queue) TryDequeue(buf []Record, write func([]Record) bool) int { origRead := q.read n := min(len(buf), q.len) - for i := 0; i < n; i++ { + for i := range n { buf[i] = q.read.Value q.read = q.read.Next() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go index 78935de6368fa..a27834a5b3264 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go @@ -30,6 +30,9 @@ should be used to describe the unique runtime environment instrumented code is being run on. That way when multiple instances of the code are collected at a single endpoint their origin is decipherable. +See [go.opentelemetry.io/otel/sdk/log/internal/x] for information about +the experimental features. + See [go.opentelemetry.io/otel/log] for more information about the OpenTelemetry Logs API. */ diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go index 8cef5dde6b5b9..a9d3c439ba35b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go @@ -119,7 +119,9 @@ func newTimeoutExporter(exp Exporter, timeout time.Duration) Exporter { // Export sets the timeout of ctx before calling the Exporter e wraps. func (e *timeoutExporter) Export(ctx context.Context, records []Record) error { - ctx, cancel := context.WithTimeout(ctx, e.timeout) + // This only used by the batch processor, and it takes processor timeout config. + // Thus, the error message points to the processor. So users know they should adjust the processor timeout. + ctx, cancel := context.WithTimeoutCause(ctx, e.timeout, errors.New("processor export timeout")) defer cancel() return e.Exporter.Export(ctx, records) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go index a39cad9e0245e..283133aba5ad9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go @@ -30,7 +30,7 @@ import ( // It provides a Processor used to filter out [Record] // that has a [log.Severity] below a threshold. type FilterProcessor interface { - // Enabled returns whether the Processor will process for the given context + // Enabled reports whether the Processor will process for the given context // and param. // // The passed param is likely to be a partial record information being @@ -57,4 +57,5 @@ type FilterProcessor interface { type EnabledParameters struct { InstrumentationScope instrumentation.Scope Severity log.Severity + EventName string } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md new file mode 100644 index 0000000000000..83e9e7b4cef04 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md @@ -0,0 +1,34 @@ +# Experimental Features + +The Logs SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Logs SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Self-Observability](#self-observability) + +### Self-Observability + +The Logs SDK provides a self-observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.log.created` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go new file mode 100644 index 0000000000000..5f01b275df990 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/log]. +package x // import "go.opentelemetry.io/otel/sdk/log/internal/x" + +import ( + "os" + "strings" +) + +// SelfObservability is an experimental feature flag that determines if SDK +// self-observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go index cd3580ec0438d..7dad98c92dd45 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go @@ -5,12 +5,18 @@ package log // import "go.opentelemetry.io/otel/sdk/log" import ( "context" + "fmt" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/log/embedded" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/log/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -24,13 +30,31 @@ type logger struct { provider *LoggerProvider instrumentationScope instrumentation.Scope + + selfObservabilityEnabled bool + logCreatedMetric otelconv.SDKLogCreated } func newLogger(p *LoggerProvider, scope instrumentation.Scope) *logger { - return &logger{ + l := &logger{ provider: p, instrumentationScope: scope, } + if !x.SelfObservability.Enabled() { + return l + } + l.selfObservabilityEnabled = true + mp := otel.GetMeterProvider() + m := mp.Meter("go.opentelemetry.io/otel/sdk/log", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL)) + + var err error + if l.logCreatedMetric, err = otelconv.NewSDKLogCreated(m); err != nil { + err = fmt.Errorf("failed to create log created metric: %w", err) + otel.Handle(err) + } + return l } func (l *logger) Emit(ctx context.Context, r log.Record) { @@ -52,6 +76,7 @@ func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool p := EnabledParameters{ InstrumentationScope: l.instrumentationScope, Severity: param.Severity, + EventName: param.EventName, } // If there are more Processors than FilterProcessors, @@ -83,7 +108,6 @@ func (l *logger) newRecord(ctx context.Context, r log.Record) Record { observedTimestamp: r.ObservedTimestamp(), severity: r.Severity(), severityText: r.SeverityText(), - body: r.Body(), traceID: sc.TraceID(), spanID: sc.SpanID(), @@ -93,7 +117,14 @@ func (l *logger) newRecord(ctx context.Context, r log.Record) Record { scope: &l.instrumentationScope, attributeValueLengthLimit: l.provider.attributeValueLengthLimit, attributeCountLimit: l.provider.attributeCountLimit, + allowDupKeys: l.provider.allowDupKeys, } + if l.selfObservabilityEnabled { + l.logCreatedMetric.Add(ctx, 1) + } + + // This ensures we deduplicate key-value collections in the log body + newRecord.SetBody(r.Body()) // This field SHOULD be set once the event is observed by OpenTelemetry. if newRecord.observedTimestamp.IsZero() { diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go index 359357b7e8995..c69422e12d438 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go @@ -32,6 +32,7 @@ type providerConfig struct { fltrProcessors []FilterProcessor attrCntLim setting[int] attrValLenLim setting[int] + allowDupKeys setting[bool] } func newProviderConfig(opts []LoggerProviderOption) providerConfig { @@ -67,6 +68,7 @@ type LoggerProvider struct { fltrProcessors []FilterProcessor attributeCountLimit int attributeValueLengthLimit int + allowDupKeys bool loggersMu sync.Mutex loggers map[instrumentation.Scope]*logger @@ -93,6 +95,7 @@ func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider { fltrProcessors: cfg.fltrProcessors, attributeCountLimit: cfg.attrCntLim.Value, attributeValueLengthLimit: cfg.attrValLenLim.Value, + allowDupKeys: cfg.allowDupKeys.Value, } } @@ -254,3 +257,21 @@ func WithAttributeValueLengthLimit(limit int) LoggerProviderOption { return cfg }) } + +// WithAllowKeyDuplication sets whether deduplication is skipped for log attributes or other key-value collections. +// +// By default, the key-value collections within a log record are deduplicated to comply with the OpenTelemetry Specification. +// Deduplication means that if multiple key–value pairs with the same key are present, only a single pair +// is retained and others are discarded. +// +// Disabling deduplication with this option can improve performance e.g. of adding attributes to the log record. +// +// Note that if you disable deduplication, you are responsible for ensuring that duplicate +// key-value pairs within in a single collection are not emitted, +// or that the telemetry receiver can handle such duplicates. +func WithAllowKeyDuplication() LoggerProviderOption { + return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig { + cfg.allowDupKeys = newSetting(true) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/record.go b/vendor/go.opentelemetry.io/otel/sdk/log/record.go index a13fcac7bd099..9dfd69b645bce 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/record.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/record.go @@ -93,6 +93,9 @@ type Record struct { attributeValueLengthLimit int attributeCountLimit int + // specifies whether we should deduplicate any key value collections or not + allowDupKeys bool + noCmp [0]func() //nolint: unused // This is indeed used. } @@ -167,7 +170,11 @@ func (r *Record) Body() log.Value { // SetBody sets the body of the log record. func (r *Record) SetBody(v log.Value) { - r.body = v + if !r.allowDupKeys { + r.body = r.dedupeBodyCollections(v) + } else { + r.body = v + } } // WalkAttributes walks all attributes the log record holds by calling f for @@ -192,56 +199,60 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) { if n == 0 { // Avoid the more complex duplicate map lookups below. var drop int - attrs, drop = dedup(attrs) - r.setDropped(drop) + if !r.allowDupKeys { + attrs, drop = dedup(attrs) + r.setDropped(drop) + } - attrs, drop = head(attrs, r.attributeCountLimit) + attrs, drop := head(attrs, r.attributeCountLimit) r.addDropped(drop) r.addAttrs(attrs) return } - // Used to find duplicates between attrs and existing attributes in r. - rIndex := r.attrIndex() - defer putIndex(rIndex) + if !r.allowDupKeys { + // Used to find duplicates between attrs and existing attributes in r. + rIndex := r.attrIndex() + defer putIndex(rIndex) - // Unique attrs that need to be added to r. This uses the same underlying - // array as attrs. - // - // Note, do not iterate attrs twice by just calling dedup(attrs) here. - unique := attrs[:0] - // Used to find duplicates within attrs itself. The index value is the - // index of the element in unique. - uIndex := getIndex() - defer putIndex(uIndex) - - // Deduplicate attrs within the scope of all existing attributes. - for _, a := range attrs { - // Last-value-wins for any duplicates in attrs. - idx, found := uIndex[a.Key] - if found { - r.addDropped(1) - unique[idx] = a - continue - } + // Unique attrs that need to be added to r. This uses the same underlying + // array as attrs. + // + // Note, do not iterate attrs twice by just calling dedup(attrs) here. + unique := attrs[:0] + // Used to find duplicates within attrs itself. The index value is the + // index of the element in unique. + uIndex := getIndex() + defer putIndex(uIndex) + + // Deduplicate attrs within the scope of all existing attributes. + for _, a := range attrs { + // Last-value-wins for any duplicates in attrs. + idx, found := uIndex[a.Key] + if found { + r.addDropped(1) + unique[idx] = a + continue + } - idx, found = rIndex[a.Key] - if found { - // New attrs overwrite any existing with the same key. - r.addDropped(1) - if idx < 0 { - r.front[-(idx + 1)] = a + idx, found = rIndex[a.Key] + if found { + // New attrs overwrite any existing with the same key. + r.addDropped(1) + if idx < 0 { + r.front[-(idx + 1)] = a + } else { + r.back[idx] = a + } } else { - r.back[idx] = a + // Unique attribute. + unique = append(unique, a) + uIndex[a.Key] = len(unique) - 1 } - } else { - // Unique attribute. - unique = append(unique, a) - uIndex[a.Key] = len(unique) - 1 } + attrs = unique } - attrs = unique if r.attributeCountLimit > 0 && n+len(attrs) > r.attributeCountLimit { // Truncate the now unique attributes to comply with limit. @@ -297,8 +308,11 @@ func (r *Record) addAttrs(attrs []log.KeyValue) { // SetAttributes sets (and overrides) attributes to the log record. func (r *Record) SetAttributes(attrs ...log.KeyValue) { var drop int - attrs, drop = dedup(attrs) - r.setDropped(drop) + r.setDropped(0) + if !r.allowDupKeys { + attrs, drop = dedup(attrs) + r.setDropped(drop) + } attrs, drop = head(attrs, r.attributeCountLimit) r.addDropped(drop) @@ -387,11 +401,8 @@ func (r *Record) SetTraceFlags(flags trace.TraceFlags) { } // Resource returns the entity that collected the log. -func (r *Record) Resource() resource.Resource { - if r.resource == nil { - return *resource.Empty() - } - return *r.resource +func (r *Record) Resource() *resource.Resource { + return r.resource } // InstrumentationScope returns the scope that the Logger was created with. @@ -429,10 +440,14 @@ func (r *Record) applyValueLimits(val log.Value) log.Value { } val = log.SliceValue(sl...) case log.KindMap: - // Deduplicate then truncate. Do not do at the same time to avoid - // wasted truncation operations. - kvs, dropped := dedup(val.AsMap()) - r.addDropped(dropped) + kvs := val.AsMap() + if !r.allowDupKeys { + // Deduplicate then truncate. Do not do at the same time to avoid + // wasted truncation operations. + var dropped int + kvs, dropped = dedup(kvs) + r.addDropped(dropped) + } for i := range kvs { kvs[i] = r.applyAttrLimits(kvs[i]) } @@ -441,6 +456,24 @@ func (r *Record) applyValueLimits(val log.Value) log.Value { return val } +func (r *Record) dedupeBodyCollections(val log.Value) log.Value { + switch val.Kind() { + case log.KindSlice: + sl := val.AsSlice() + for i := range sl { + sl[i] = r.dedupeBodyCollections(sl[i]) + } + val = log.SliceValue(sl...) + case log.KindMap: + kvs, _ := dedup(val.AsMap()) + for i := range kvs { + kvs[i].Value = r.dedupeBodyCollections(kvs[i].Value) + } + val = log.MapValue(kvs...) + } + return val +} + // truncate returns a truncated version of s such that it contains less than // the limit number of characters. Truncation is applied by returning the limit // number of valid characters contained in s. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md deleted file mode 100644 index 82e1f46b4eafa..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.20.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go deleted file mode 100644 index 6685c392b50b3..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go +++ /dev/null @@ -1,1198 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// Describes HTTP attributes. -const ( - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. It represents the hTTP request method. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. It represents the [HTTP - // response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: ConditionallyRequired (If and only if one was - // received/sent.) - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. It represents the hTTP request method. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. It represents the [HTTP response -// status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTP Server spans attributes -const ( - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. It represents the URI scheme identifying the used - // protocol. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route (path template in - // the format used by the respective server framework). See note below - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if it's available) - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) - // if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. It represents the URI scheme identifying the used -// protocol. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route (path template in the -// format used by the respective server framework). See note below -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the name identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'click', 'exception' - EventNameKey = attribute.Key("event.name") - - // EventDomainKey is the attribute Key conforming to the "event.domain" - // semantic conventions. It represents the domain identifies the business - // context for the events. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: Events across different domains may have same `event.name`, yet be - // unrelated events. - EventDomainKey = attribute.Key("event.domain") -) - -var ( - // Events from browser apps - EventDomainBrowser = EventDomainKey.String("browser") - // Events from mobile apps - EventDomainDevice = EventDomainKey.String("device") - // Events from Kubernetes - EventDomainK8S = EventDomainKey.String("k8s") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the name identifies the event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. It represents the transport protocol used. See - // note below. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - NetTransportKey = attribute.Key("net.transport") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. It represents the application - // layer protocol used. The value SHOULD be normalized to lowercase. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. It represents the version - // of the application layer protocol used. See note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `net.protocol.version` refers to the version of the protocol used - // and might be different from the protocol client's version. If the HTTP - // client used has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. It represents the remote - // socket peer name. - // - // Type: string - // RequirementLevel: Recommended (If available and different from - // `net.peer.name` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 'proxy.example.com' - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. It represents the remote - // socket peer address: IPv4 or IPv6 for internet protocols, path for local - // communication, - // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '127.0.0.1', '/tmp/mysql.sock' - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. It represents the remote - // socket peer port. - // - // Type: int - // RequirementLevel: Recommended (If defined for the address family and if - // different than `net.peer.port` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 16456 - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. It represents the protocol - // [address - // family](https://man7.org/linux/man-pages/man7/address_families.7.html) - // which is used for communication. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (If different than `inet` and if - // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers - // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in - // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support - // instrumentations that follow previous versions of this document.) - // Stability: stable - // Examples: 'inet6', 'bluetooth' - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. It represents the logical remote hostname, see - // note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com' - // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an - // extra DNS lookup. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. It represents the logical remote port number - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. It represents the logical local hostname or - // similar, see note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. It represents the logical local port number, - // preferably the one that the peer used to connect - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 8080 - NetHostPortKey = attribute.Key("net.host.port") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. It represents the local - // socket address. Useful in case of a multi-IP host. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '192.168.0.1' - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. It represents the local - // socket port number. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If defined for the address - // family and if different than `net.host.port` and if `net.sock.host.addr` - // is set. In other cases, it is still recommended to set this.) - // Stability: stable - // Examples: 35555 - NetSockHostPortKey = attribute.Key("net.sock.host.port") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -var ( - // IPv4 address - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. It represents the application -// layer protocol used. The value SHOULD be normalized to lowercase. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. It represents the version of -// the application layer protocol used. See note below. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. It represents the remote socket -// peer name. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. It represents the remote socket -// peer address: IPv4 or IPv6 for internet protocols, path for local -// communication, -// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. It represents the remote socket -// peer port. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. It represents the logical remote -// hostname, see note below. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. It represents the logical remote port -// number -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. It represents the logical local -// hostname or similar, see note below. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. It represents the logical local port -// number, preferably the one that the peer used to connect -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. It represents the local socket -// address. Useful in case of a multi-IP host. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. It represents the local socket -// port number. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostConnectionTypeKey is the attribute Key conforming to the - // "net.host.connection.type" semantic conventions. It represents the - // internet connection type currently being used by the host. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'wifi' - NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") - - // NetHostConnectionSubtypeKey is the attribute Key conforming to the - // "net.host.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'LTE' - NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") - - // NetHostCarrierNameKey is the attribute Key conforming to the - // "net.host.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'sprint' - NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") - - // NetHostCarrierMccKey is the attribute Key conforming to the - // "net.host.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '310' - NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") - - // NetHostCarrierMncKey is the attribute Key conforming to the - // "net.host.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '001' - NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") - - // NetHostCarrierIccKey is the attribute Key conforming to the - // "net.host.carrier.icc" semantic conventions. It represents the ISO - // 3166-1 alpha-2 2-character country code associated with the mobile - // carrier network. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'DE' - NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") -) - -var ( - // wifi - NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") - // wired - NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") - // cell - NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") - // unavailable - NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") - // unknown - NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") -) - -var ( - // GPRS - NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") - // EDGE - NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") - // UMTS - NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") - // CDMA - NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") - // HSPA - NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") - // IDEN - NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") - // LTE - NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") - // EHRPD - NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") - // GSM - NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") -) - -// NetHostCarrierName returns an attribute KeyValue conforming to the -// "net.host.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetHostCarrierName(val string) attribute.KeyValue { - return NetHostCarrierNameKey.String(val) -} - -// NetHostCarrierMcc returns an attribute KeyValue conforming to the -// "net.host.carrier.mcc" semantic conventions. It represents the mobile -// carrier country code. -func NetHostCarrierMcc(val string) attribute.KeyValue { - return NetHostCarrierMccKey.String(val) -} - -// NetHostCarrierMnc returns an attribute KeyValue conforming to the -// "net.host.carrier.mnc" semantic conventions. It represents the mobile -// carrier network code. -func NetHostCarrierMnc(val string) attribute.KeyValue { - return NetHostCarrierMncKey.String(val) -} - -// NetHostCarrierIcc returns an attribute KeyValue conforming to the -// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetHostCarrierIcc(val string) attribute.KeyValue { - return NetHostCarrierIccKey.String(val) -} - -// Semantic conventions for HTTP client and server Spans. -const ( - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. It represents the - // size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. It represents the - // size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") -) - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. It represents the size -// of the request payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. It represents the size -// of the response payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// Semantic convention describing per-message attributes populated on messaging -// spans or links. -const ( - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the [conversation ID](#conversations) identifying the conversation to - // which the message belongs, represented as a string. Sometimes called - // "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to - // the "messaging.message.payload_size_bytes" semantic conventions. It - // represents the (uncompressed) size of the message payload in bytes. Also - // use this attribute if it is unknown whether the compressed or - // uncompressed payload size is reported. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") - - // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key - // conforming to the "messaging.message.payload_compressed_size_bytes" - // semantic conventions. It represents the compressed size of the message - // payload in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") -) - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the [conversation ID](#conversations) identifying the -// conversation to which the message belongs, represented as a string. -// Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming -// to the "messaging.message.payload_size_bytes" semantic conventions. It -// represents the (uncompressed) size of the message payload in bytes. Also use -// this attribute if it is unknown whether the compressed or uncompressed -// payload size is reported. -func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadSizeBytesKey.Int(val) -} - -// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue -// conforming to the "messaging.message.payload_compressed_size_bytes" semantic -// conventions. It represents the compressed size of the message payload in -// bytes. -func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) -} - -// Semantic convention for attributes that describe messaging destination on -// broker -const ( - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker does not have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") -) - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// Semantic convention for attributes that describe messaging source on broker -const ( - // MessagingSourceNameKey is the attribute Key conforming to the - // "messaging.source.name" semantic conventions. It represents the message - // source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Source name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker does not have such notion, the source name SHOULD uniquely - // identify the broker. - MessagingSourceNameKey = attribute.Key("messaging.source.name") - - // MessagingSourceTemplateKey is the attribute Key conforming to the - // "messaging.source.template" semantic conventions. It represents the low - // cardinality representation of the messaging source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Source names could be constructed from templates. An example would - // be a source name involving a user name or product id. Although the - // source name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and - // aggregation. - MessagingSourceTemplateKey = attribute.Key("messaging.source.template") - - // MessagingSourceTemporaryKey is the attribute Key conforming to the - // "messaging.source.temporary" semantic conventions. It represents a - // boolean that is true if the message source is temporary and might not - // exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") - - // MessagingSourceAnonymousKey is the attribute Key conforming to the - // "messaging.source.anonymous" semantic conventions. It represents a - // boolean that is true if the message source is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") -) - -// MessagingSourceName returns an attribute KeyValue conforming to the -// "messaging.source.name" semantic conventions. It represents the message -// source name -func MessagingSourceName(val string) attribute.KeyValue { - return MessagingSourceNameKey.String(val) -} - -// MessagingSourceTemplate returns an attribute KeyValue conforming to the -// "messaging.source.template" semantic conventions. It represents the low -// cardinality representation of the messaging source name -func MessagingSourceTemplate(val string) attribute.KeyValue { - return MessagingSourceTemplateKey.String(val) -} - -// MessagingSourceTemporary returns an attribute KeyValue conforming to the -// "messaging.source.temporary" semantic conventions. It represents a boolean -// that is true if the message source is temporary and might not exist anymore -// after messages are processed. -func MessagingSourceTemporary(val bool) attribute.KeyValue { - return MessagingSourceTemporaryKey.Bool(val) -} - -// MessagingSourceAnonymous returns an attribute KeyValue conforming to the -// "messaging.source.anonymous" semantic conventions. It represents a boolean -// that is true if the message source is anonymous (could be unnamed or have -// auto-generated name). -func MessagingSourceAnonymous(val bool) attribute.KeyValue { - return MessagingSourceAnonymousKey.Bool(val) -} - -// Attributes for RabbitMQ -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If not empty.) - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// Attributes for Apache Kafka -const ( - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaClientIDKey is the attribute Key conforming to the - // "messaging.kafka.client_id" semantic conventions. It represents the - // client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the - // "messaging.kafka.source.partition" semantic conventions. It represents - // the partition the message is received from. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If value is `true`. When - // missing, the value is assumed to be `false`.) - // Stability: stable - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaClientID returns an attribute KeyValue conforming to the -// "messaging.kafka.client_id" semantic conventions. It represents the client -// ID for the Consumer or Producer that is handling the message. -func MessagingKafkaClientID(val string) attribute.KeyValue { - return MessagingKafkaClientIDKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to -// the "messaging.kafka.source.partition" semantic conventions. It represents -// the partition the message is received from. -func MessagingKafkaSourcePartition(val int) attribute.KeyValue { - return MessagingKafkaSourcePartitionKey.Int(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// Attributes for Apache RocketMQ -const ( - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqClientIDKey is the attribute Key conforming to the - // "messaging.rocketmq.client_id" semantic conventions. It represents the - // unique identifier for each client. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myhost@8742@s8083jm' - MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delay time level is not specified.) - // Stability: stable - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delivery timestamp is not specified.) - // Stability: stable - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) - // Stability: stable - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqClientID returns an attribute KeyValue conforming to the -// "messaging.rocketmq.client_id" semantic conventions. It represents the -// unique identifier for each client. -func MessagingRocketmqClientID(val string) attribute.KeyValue { - return MessagingRocketmqClientIDKey.String(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go deleted file mode 100644 index 0d1f55a8fe9b1..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.20.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go deleted file mode 100644 index 63776393217c7..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessageTypeKey = attribute.Key("message.type") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} - -// The attributes used to report a single exception associated with a span. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example above](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go deleted file mode 100644 index f40c97825aa2b..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go deleted file mode 100644 index 9c1840631b663..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go deleted file mode 100644 index 3d44dae2750b9..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go +++ /dev/null @@ -1,2060 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) -// on Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// Heroku dyno metadata -const ( - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") -) - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// A container instance. -const ( - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageTagKey is the attribute Key conforming to the - // "container.image.tag" semantic conventions. It represents the container - // image tag. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageTag returns an attribute KeyValue conforming to the -// "container.image.tag" semantic conventions. It represents the container -// image tag. -func ContainerImageTag(val string) attribute.KeyValue { - return ContainerImageTagKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment -// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka -// deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// The device on which the process represented by this resource is running. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of - // the device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// A serverless instance. -const ( - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run:** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// A host is defined as a general computing instance. -const ( - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - HostArchKey = attribute.Key("host.arch") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID. For Cloud, this - // value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID. For -// Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image as defined in [Version -// Attributes](README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// A Kubernetes Cluster. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// A Kubernetes Node object. -const ( - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// A Kubernetes Namespace. -const ( - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// A Kubernetes Pod object. -const ( - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// A container in a -// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") -) - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// A Kubernetes ReplicaSet object. -const ( - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") -) - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// A Kubernetes Deployment object. -const ( - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// A Kubernetes StatefulSet object. -const ( - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") -) - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// A Kubernetes DaemonSet object. -const ( - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") -) - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// A Kubernetes Job object. -const ( - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// A Kubernetes CronJob object. -const ( - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - OSTypeKey = attribute.Key("os.type") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](../../resource/semantic_conventions/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// The single (language) runtime instance which is monitored. -const ( - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// A service instance. -const ( - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryAutoVersionKey is the attribute Key conforming to the - // "telemetry.auto.version" semantic conventions. It represents the version - // string of the auto instrumentation agent, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -// TelemetryAutoVersion returns an attribute KeyValue conforming to the -// "telemetry.auto.version" semantic conventions. It represents the version -// string of the auto instrumentation agent, if used. -func TelemetryAutoVersion(val string) attribute.KeyValue { - return TelemetryAutoVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. It represents the deprecated, - // use the `otel.scope.name` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. It represents the - // deprecated, use the `otel.scope.version` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. It represents the deprecated, use -// the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. It represents the deprecated, -// use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go deleted file mode 100644 index 95d0210e38f27..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go deleted file mode 100644 index 90b1b0452cc00..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go +++ /dev/null @@ -1,2599 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") -) - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span does not depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The attributes used to perform database client calls. -const ( - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - DBSystemKey = attribute.Key("db.system") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: ConditionallyRequired (If applicable.) - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Recommended (Should be collected by default only if - // there is sanitization that excludes sensitive information.) - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If `db.statement` is not - // applicable.) - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// Connection-level attributes for Microsoft SQL Server -const ( - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no - // longer required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// Call-level attributes for Cassandra -const ( - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary table that the operation is acting upon, including the keyspace - // name (if applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary table that the operation is acting upon, including the keyspace name -// (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// Call-level attributes for Redis -const ( - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If other than the default - // database (`0`).) - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// Call-level attributes for MongoDB -const ( - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the collection -// being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// Call-level attributes for SQL databases -const ( - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// Call-level attributes for Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (when performing one of the - // operations in this list) - // Stability: stable - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as - // default)) - // Stability: stable - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if available) - // Stability: stable - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: ConditionallyRequired (if response was received) - // Stability: stable - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: ConditionallyRequired (when response was received and - // contained sub-code.) - // Stability: stable - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: ConditionallyRequired (when available) - // Stability: stable - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: For the server/consumer span on the incoming side, - // `faas.trigger` MUST be set. - // - // Clients invoking FaaS instances usually cannot set `faas.trigger`, - // since they would typically need to look in the payload to determine - // the event type. If clients set it, it should be the same as the - // trigger that corresponding incoming would have (i.e., this has - // nothing to do with the underlying transport used to make the API - // call to invoke the lambda, which is often HTTP). - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// Contains additional attributes for outgoing FaaS spans. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](../../resource/semantic_conventions/README.md#service) - // of the remote service. SHOULD be equal to the actual `service.name` - // resource attribute of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](../../resource/semantic_conventions/README.md#service) of -// the remote service. SHOULD be equal to the actual `service.name` resource -// attribute of the remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") -) - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// Semantic Convention for HTTP Client -const ( - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. It represents the full HTTP request URL in the form - // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is - // not transmitted over HTTP, but if it is known, it should be included - // nevertheless. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the - // attribute's value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - - // HTTPResendCountKey is the attribute Key conforming to the - // "http.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Recommended (if and only if request was retried.) - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPResendCountKey = attribute.Key("http.resend_count") -) - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. It represents the full HTTP request URL in the form -// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not -// transmitted over HTTP, but if it is known, it should be included -// nevertheless. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPResendCount returns an attribute KeyValue conforming to the -// "http.resend_count" semantic conventions. It represents the ordinal number -// of request resending attempt (for any reason, including redirects). -func HTTPResendCount(val int) attribute.KeyValue { - return HTTPResendCountKey.Int(val) -} - -// Semantic Convention for HTTP Server -const ( - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. It represents the full request target as passed in - // a HTTP request line or equivalent. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '/users/12314/?q=ddds' - HTTPTargetKey = attribute.Key("http.target") - - // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" - // semantic conventions. It represents the IP address of the original - // client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.sock.peer.addr`, which - // would - // identify the network-level peer, which may be a proxy. - // - // This attribute should be set when a source of information different - // from the one used for `net.sock.peer.addr`, is available even if that - // other - // source just confirms the same value as `net.sock.peer.addr`. - // Rationale: For `net.sock.peer.addr`, one typically does not know if it - // comes from a proxy, reverse proxy, or the actual client. Setting - // `http.client_ip` when it's the same as `net.sock.peer.addr` means that - // one is at least somewhat confident that the address is not that of - // the closest proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. It represents the full request target as passed in a -// HTTP request line or equivalent. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPClientIP returns an attribute KeyValue conforming to the -// "http.client_ip" semantic conventions. It represents the IP address of the -// original client behind all proxies, if known (e.g. from -// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). -func HTTPClientIP(val string) attribute.KeyValue { - return HTTPClientIPKey.String(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") - - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// General attributes used in messaging systems. -const ( - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents a string - // identifying the messaging system. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation as defined in the [Operation - // names](#operation-names) section above. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the span describes an - // operation on a batch of messages.) - // Stability: stable - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") -) - -var ( - // publish - MessagingOperationPublish = MessagingOperationKey.String("publish") - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// MessagingSystem returns an attribute KeyValue conforming to the -// "messaging.system" semantic conventions. It represents a string identifying -// the messaging system. -func MessagingSystem(val string) attribute.KeyValue { - return MessagingSystemKey.String(val) -} - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// Semantic convention for a consumer of messages received from a messaging -// system -const ( - // MessagingConsumerIDKey is the attribute Key conforming to the - // "messaging.consumer.id" semantic conventions. It represents the - // identifier for the consumer receiving a message. For Kafka, set it to - // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if - // both are present, or only `messaging.kafka.consumer.group`. For brokers, - // such as RabbitMQ and Artemis, set it to the `client_id` of the client - // consuming the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mygroup - client-6' - MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") -) - -// MessagingConsumerID returns an attribute KeyValue conforming to the -// "messaging.consumer.id" semantic conventions. It represents the identifier -// for the consumer receiving a message. For Kafka, set it to -// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both -// are present, or only `messaging.kafka.consumer.group`. For brokers, such as -// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the -// message. -func MessagingConsumerID(val string) attribute.KeyValue { - return MessagingConsumerIDKey.String(val) -} - -// Semantic conventions for remote procedure calls. -const ( - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCSystemKey = attribute.Key("rpc.system") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// Tech-specific attributes for gRPC. -const ( - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // does not specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If other than the default - // version (`1.0`)) - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If response is not successful.) - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// does not specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// Tech-specific attributes for Connect RPC. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (If response is not successful - // and if error code available.) - // Stability: stable - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 16e1aa7ab47ec..9d3955bd73327 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -261,7 +261,7 @@ func modPathOK(r rune) bool { // importPathOK reports whether r can appear in a package import path element. // -// Import paths are intermediate between module paths and file paths: we allow +// Import paths are intermediate between module paths and file paths: we // disallow characters that would be confusing or ambiguous as arguments to // 'go get' (such as '@' and ' ' ), but allow certain characters that are // otherwise-unambiguous on the command line and historically used for some diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index 628f8fd687c90..824b282c8308a 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -45,8 +45,8 @@ func IsValid(v string) bool { // Canonical returns the canonical formatting of the semantic version v. // It fills in any missing .MINOR or .PATCH and discards build metadata. -// Two semantic versions compare equal only if their canonical formattings -// are identical strings. +// Two semantic versions compare equal only if their canonical formatting +// is an identical string. // The canonical invalid semantic version is the empty string. func Canonical(v string) string { p, ok := parse(v) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go index 7e72d3c284b8a..fc9bbc714c633 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -467,7 +467,9 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { // This algorithm could be implemented using c.Inspect, // but it is about 2.5x slower. - best := int32(-1) // push index of latest (=innermost) node containing range + // best is the push-index of the latest (=innermost) node containing range. + // (Beware: latest is not always innermost because FuncDecl.{Name,Type} overlap.) + best := int32(-1) for i, limit := c.indices(); i < limit; i++ { ev := events[i] if ev.index > i { // push? @@ -481,6 +483,19 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { continue } } else { + // Edge case: FuncDecl.Name and .Type overlap: + // Don't update best from Name to FuncDecl.Type. + // + // The condition can be read as: + // - n is FuncType + // - n.parent is FuncDecl + // - best is strictly beneath the FuncDecl + if ev.typ == 1< ev.parent { + continue + } + nodeEnd = n.End() if n.Pos() > start { break // disjoint, after; stop diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index af6a60d75f87f..c546b1b63e365 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -78,7 +78,7 @@ func PrintErrors(pkgs []*Package) int { return n } -// Postorder returns an iterator over the the packages in +// Postorder returns an iterator over the packages in // the import graph whose roots are pkg. // Packages are enumerated in dependencies-first order. func Postorder(pkgs []*Package) iter.Seq[*Package] { diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 6c0c74968f353..6646bf5508908 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -249,7 +249,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { case *types.Func: // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + if recv := obj.Signature().Recv(); recv == nil { return "", fmt.Errorf("func is not a method: %v", obj) } @@ -405,7 +405,7 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { return "", false } - _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + _, named := typesinternal.ReceiverNamed(meth.Signature().Recv()) if named == nil { return "", false } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index f035a0b6be92c..36624572a6639 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -304,8 +304,7 @@ func (h hasher) hash(t types.Type) uint32 { case *types.Named: hash := h.hashTypeName(t.Obj()) targs := t.TypeArgs() - for i := 0; i < targs.Len(); i++ { - targ := targs.At(i) + for targ := range targs.Types() { hash += 2 * h.hash(targ) } return hash diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index 734c46198dfb7..555ef626c0074 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -34,7 +34,7 @@ type fileInfo struct { const maxlines = 64 * 1024 func (s *fakeFileSet) pos(file string, line, column int) token.Pos { - // TODO(mdempsky): Make use of column. + _ = column // TODO(mdempsky): Make use of column. // Since we don't know the set of needed file positions, we reserve maxlines // positions per file. We delay calling token.File.SetLines until all diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 4a4357d2bd444..2bef2b058ba55 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -829,8 +829,7 @@ func (p *iexporter) doDecl(obj types.Object) { // their name must be qualified before exporting recv. if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() - for i := 0; i < rparams.Len(); i++ { - rparam := rparams.At(i) + for rparam := range rparams.TypeParams() { name := tparamExportName(prefix, rparam) w.p.tparamNames[rparam.Obj()] = name } @@ -944,6 +943,13 @@ func (w *exportWriter) posV0(pos token.Pos) { } func (w *exportWriter) pkg(pkg *types.Package) { + if pkg == nil { + // [exportWriter.typ] accepts a nil pkg only for types + // of constants, which cannot contain named objects + // such as fields or methods and thus should never + // reach this method (#76222). + panic("nil package") + } // Ensure any referenced packages are declared in the main index. w.p.allPkgs[pkg] = true @@ -959,9 +965,11 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) { w.pkg(obj.Pkg()) } -// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass -// it in explicitly into signatures and structs that may use it for -// constructing fields. +// typ emits the specified type. +// +// Objects within the type (struct fields and interface methods) are +// qualified by pkg. It may be nil if the type cannot contain objects, +// such as the type of a constant. func (w *exportWriter) typ(t types.Type, pkg *types.Package) { w.data.uint64(w.p.typOff(t, pkg)) } @@ -991,6 +999,7 @@ func (w *exportWriter) startType(k itag) { w.data.uint64(uint64(k)) } +// doTyp is the implementation of [exportWriter.typ]. func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { if trace { w.p.trace("exporting type %s (%T)", t, t) @@ -1064,7 +1073,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Signature: w.startType(signatureType) - w.pkg(pkg) + w.pkg(pkg) // qualifies param/result vars w.signature(t) case *types.Struct: @@ -1110,19 +1119,19 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Interface: w.startType(interfaceType) - w.pkg(pkg) + w.pkg(pkg) // qualifies unexported method funcs n := t.NumEmbeddeds() w.uint64(uint64(n)) for i := 0; i < n; i++ { ft := t.EmbeddedType(i) - tPkg := pkg if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { + // e.g. ~int w.pos(token.NoPos) } - w.typ(ft, tPkg) + w.typ(ft, pkg) } // See comment for struct fields. In shallow mode we change the encoding @@ -1223,20 +1232,19 @@ func (w *exportWriter) signature(sig *types.Signature) { func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) - for i := 0; i < ts.Len(); i++ { - w.typ(ts.At(i), pkg) + for t := range ts.Types() { + w.typ(t, pkg) } } func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) + for tparam := range list.TypeParams() { // Set the type parameter exportName before exporting its type. exportName := tparamExportName(prefix, tparam) w.p.tparamNames[tparam.Obj()] = exportName - w.typ(list.At(i), pkg) + w.typ(tparam, pkg) } } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 82e6c9d2dc16f..4d6d50094a0e0 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -432,10 +432,10 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { errorf("%v.%v not in index", pkg, name) } - r := &importReader{p: p, currPkg: pkg} + r := &importReader{p: p} r.declReader.Reset(p.declData[off:]) - r.obj(name) + r.obj(pkg, name) } func (p *iimporter) stringAt(off uint64) string { @@ -551,7 +551,6 @@ func canReuse(def *types.Named, rhs types.Type) bool { type importReader struct { p *iimporter declReader bytes.Reader - currPkg *types.Package prevFile string prevLine int64 prevColumn int64 @@ -565,7 +564,8 @@ type importReader struct { // for 1.24, but the fix was not worth back-porting). var markBlack = func(name *types.TypeName) {} -func (r *importReader) obj(name string) { +// obj decodes and declares the package-level object denoted by (pkg, name). +func (r *importReader) obj(pkg *types.Package, name string) { tag := r.byte() pos := r.pos() @@ -576,27 +576,27 @@ func (r *importReader) obj(name string) { tparams = r.tparamList() } typ := r.typ() - obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + obj := aliases.NewAlias(r.p.aliases, pos, pkg, name, typ, tparams) markBlack(obj) // workaround for golang/go#69912 r.declare(obj) case constTag: typ, val := r.value() - r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + r.declare(types.NewConst(pos, pkg, name, typ, val)) case funcTag, genericFuncTag: var tparams []*types.TypeParam if tag == genericFuncTag { tparams = r.tparamList() } - sig := r.signature(nil, nil, tparams) - r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + sig := r.signature(pkg, nil, nil, tparams) + r.declare(types.NewFunc(pos, pkg, name, sig)) case typeTag, genericTypeTag: // Types can be recursive. We need to setup a stub // declaration before recursing. - obj := types.NewTypeName(pos, r.currPkg, name, nil) + obj := types.NewTypeName(pos, pkg, name, nil) named := types.NewNamed(obj, nil, nil) markBlack(obj) // workaround for golang/go#69912 @@ -616,7 +616,7 @@ func (r *importReader) obj(name string) { for n := r.uint64(); n > 0; n-- { mpos := r.pos() mname := r.ident() - recv := r.param() + recv := r.param(pkg) // If the receiver has any targs, set those as the // rparams of the method (since those are the @@ -630,9 +630,9 @@ func (r *importReader) obj(name string) { rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } - msig := r.signature(recv, rparams, nil) + msig := r.signature(pkg, recv, rparams, nil) - named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + named.AddMethod(types.NewFunc(mpos, pkg, mname, msig)) } } @@ -644,12 +644,12 @@ func (r *importReader) obj(name string) { errorf("unexpected type param type") } name0 := tparamName(name) - tn := types.NewTypeName(pos, r.currPkg, name0, nil) + tn := types.NewTypeName(pos, pkg, name0, nil) t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg, name} + id := ident{pkg, name} r.p.tparamIndex[id] = t var implicit bool if r.p.version >= iexportVersionGo1_18 { @@ -672,7 +672,7 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - v := types.NewVar(pos, r.currPkg, name, typ) + v := types.NewVar(pos, pkg, name, typ) typesinternal.SetVarKind(v, typesinternal.PackageVar) r.declare(v) @@ -905,11 +905,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { case mapType: return types.NewMap(r.typ(), r.typ()) case signatureType: - r.currPkg = r.pkg() - return r.signature(nil, nil, nil) + paramPkg := r.pkg() + return r.signature(paramPkg, nil, nil, nil) case structType: - r.currPkg = r.pkg() + fieldPkg := r.pkg() fields := make([]*types.Var, r.uint64()) tags := make([]string, len(fields)) @@ -932,7 +932,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // discussed in iexport.go, this is not correct, but mostly works and is // preferable to failing (for now at least). if field == nil { - field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + field = types.NewField(fpos, fieldPkg, fname, ftyp, emb) } fields[i] = field @@ -941,7 +941,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { return types.NewStruct(fields, tags) case interfaceType: - r.currPkg = r.pkg() + methodPkg := r.pkg() // qualifies methods and their param/result vars embeddeds := make([]types.Type, r.uint64()) for i := range embeddeds { @@ -963,12 +963,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // don't agree with this. var recv *types.Var if base != nil { - recv = types.NewVar(token.NoPos, r.currPkg, "", base) + recv = types.NewVar(token.NoPos, methodPkg, "", base) } - msig := r.signature(recv, nil, nil) + msig := r.signature(methodPkg, recv, nil, nil) if method == nil { - method = types.NewFunc(mpos, r.currPkg, mname, msig) + method = types.NewFunc(mpos, methodPkg, mname, msig) } methods[i] = method } @@ -1049,9 +1049,9 @@ func (r *importReader) objectPathObject() types.Object { return obj } -func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { - params := r.paramList() - results := r.paramList() +func (r *importReader) signature(paramPkg *types.Package, recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { + params := r.paramList(paramPkg) + results := r.paramList(paramPkg) variadic := params.Len() > 0 && r.bool() return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } @@ -1070,19 +1070,19 @@ func (r *importReader) tparamList() []*types.TypeParam { return xs } -func (r *importReader) paramList() *types.Tuple { +func (r *importReader) paramList(pkg *types.Package) *types.Tuple { xs := make([]*types.Var, r.uint64()) for i := range xs { - xs[i] = r.param() + xs[i] = r.param(pkg) } return types.NewTuple(xs...) } -func (r *importReader) param() *types.Var { +func (r *importReader) param(pkg *types.Package) *types.Var { pos := r.pos() name := r.ident() typ := r.typ() - return types.NewParam(pos, r.currPkg, name, typ) + return types.NewParam(pos, pkg, name, typ) } func (r *importReader) bool() bool { diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go index 96ad6c582105e..581784da4351b 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -12,354 +12,508 @@ type pkginfo struct { } var deps = [...]pkginfo{ - {"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, - {"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, - {"bufio", "\x03k\x83\x01D\x14"}, - {"bytes", "n*Y\x03\fG\x02\x02"}, + {"archive/tar", "\x03n\x03E<\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, + {"archive/zip", "\x02\x04d\a\x03\x12\x021<\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, + {"bufio", "\x03n\x84\x01D\x14"}, + {"bytes", "q*Z\x03\fG\x02\x02"}, {"cmp", ""}, - {"compress/bzip2", "\x02\x02\xed\x01A"}, - {"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"}, - {"compress/gzip", "\x02\x04a\a\x03\x14lT"}, - {"compress/lzw", "\x02l\x03\x80\x01"}, - {"compress/zlib", "\x02\x04a\a\x03\x12\x01m"}, - {"container/heap", "\xb3\x02"}, + {"compress/bzip2", "\x02\x02\xf1\x01A"}, + {"compress/flate", "\x02o\x03\x81\x01\f\x033\x01\x03"}, + {"compress/gzip", "\x02\x04d\a\x03\x14mT"}, + {"compress/lzw", "\x02o\x03\x81\x01"}, + {"compress/zlib", "\x02\x04d\a\x03\x12\x01n"}, + {"container/heap", "\xb7\x02"}, {"container/list", ""}, {"container/ring", ""}, - {"context", "n\\m\x01\r"}, - {"crypto", "\x83\x01nC"}, - {"crypto/aes", "\x10\n\a\x93\x02"}, - {"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"}, - {"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"}, - {"crypto/dsa", "A\x04)\x83\x01\r"}, - {"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"}, - {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"}, - {"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"}, - {"crypto/elliptic", "0>\x83\x01\r9"}, - {"crypto/fips140", " \x05"}, - {"crypto/hkdf", "-\x13\x01-\x15"}, - {"crypto/hmac", "\x1a\x14\x12\x01\x111"}, - {"crypto/internal/boring", "\x0e\x02\rf"}, - {"crypto/internal/boring/bbig", "\x1a\xe4\x01M"}, - {"crypto/internal/boring/bcache", "\xb8\x02\x13"}, + {"context", "q[o\x01\r"}, + {"crypto", "\x86\x01oC"}, + {"crypto/aes", "\x10\n\t\x95\x02"}, + {"crypto/cipher", "\x03 \x01\x01\x1f\x11\x1c+Y"}, + {"crypto/des", "\x10\x15\x1f-+\x9c\x01\x03"}, + {"crypto/dsa", "D\x04)\x84\x01\r"}, + {"crypto/ecdh", "\x03\v\f\x10\x04\x16\x04\r\x1c\x84\x01"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\a\v\x06\x01\x04\f\x01\x1c\x84\x01\r\x05K\x01"}, + {"crypto/ed25519", "\x0e\x1e\x11\a\n\a\x1c\x84\x01C"}, + {"crypto/elliptic", "2?\x84\x01\r9"}, + {"crypto/fips140", "\"\x05"}, + {"crypto/hkdf", "/\x14\x01-\x15"}, + {"crypto/hmac", "\x1a\x16\x13\x01\x111"}, + {"crypto/internal/boring", "\x0e\x02\ri"}, + {"crypto/internal/boring/bbig", "\x1a\xe8\x01M"}, + {"crypto/internal/boring/bcache", "\xbc\x02\x13"}, {"crypto/internal/boring/sig", ""}, - {"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, - {"crypto/internal/entropy", "F"}, - {"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"}, - {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"}, - {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"}, - {"crypto/internal/fips140/alias", "\xcb\x02"}, - {"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"}, - {"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"}, - {"crypto/internal/fips140/check/checktest", "%\x85\x02!"}, - {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"}, - {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"}, - {"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"}, - {"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"}, - {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"}, - {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"}, - {"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"}, - {"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"}, - {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"}, - {"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"}, - {"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/ssh", "%^"}, - {"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"}, - {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"}, - {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"}, - {"crypto/internal/fips140cache", "\xaa\x02\r&"}, + {"crypto/internal/constanttime", ""}, + {"crypto/internal/cryptotest", "\x03\r\n\b%\x0e\x19\x06\x12\x12 \x04\x06\t\x18\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, + {"crypto/internal/entropy", "I"}, + {"crypto/internal/entropy/v1.0.0", "B/\x93\x018\x13"}, + {"crypto/internal/fips140", "A0\xbd\x01\v\x16"}, + {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x13\x05\x01\x01\x06*\x93\x014"}, + {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x11\x05\x01\a*\x90\x01"}, + {"crypto/internal/fips140/alias", "\xcf\x02"}, + {"crypto/internal/fips140/bigmod", "'\x18\x01\a*\x93\x01"}, + {"crypto/internal/fips140/check", "\"\x0e\x06\t\x02\xb4\x01Z"}, + {"crypto/internal/fips140/check/checktest", "'\x87\x02!"}, + {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x13\x05\t\x01(\x84\x01\x0f7\x01"}, + {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\t\r2\x84\x01\x0f7"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x02\x069\x15oF"}, + {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\v9\xc7\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x112\x93\x017"}, + {"crypto/internal/fips140/edwards25519/field", "'\x13\x052\x93\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\x06;\x15"}, + {"crypto/internal/fips140/hmac", "\x03\x1f\x14\x01\x019\x15"}, + {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0e\x03\x052\xca\x01"}, + {"crypto/internal/fips140/nistec", "\x1e\t\f\f2\x93\x01*\r\x14"}, + {"crypto/internal/fips140/nistec/fiat", "'\x137\x93\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\x06;\x15"}, + {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\r\x01\x01\x027\x15oF"}, + {"crypto/internal/fips140/sha256", "\x03\x1f\x1d\x01\a*\x15~"}, + {"crypto/internal/fips140/sha3", "\x03\x1f\x18\x05\x011\x93\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1f\x1d\x01\a*\x15~"}, + {"crypto/internal/fips140/ssh", "'_"}, + {"crypto/internal/fips140/subtle", "\x1e\a\x1a\xc5\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\x06\x029\x15"}, + {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\a\t2\x15"}, + {"crypto/internal/fips140cache", "\xae\x02\r&"}, {"crypto/internal/fips140deps", ""}, - {"crypto/internal/fips140deps/byteorder", "\x99\x01"}, - {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, - {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, - {"crypto/internal/fips140hash", "5\x1b3\xc8\x01"}, - {"crypto/internal/fips140only", "'\r\x01\x01M3;"}, + {"crypto/internal/fips140deps/byteorder", "\x9c\x01"}, + {"crypto/internal/fips140deps/cpu", "\xb1\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb9\x01"}, + {"crypto/internal/fips140deps/time", "\xc9\x02"}, + {"crypto/internal/fips140hash", "7\x1c3\xc9\x01"}, + {"crypto/internal/fips140only", ")\r\x01\x01N3<"}, {"crypto/internal/fips140test", ""}, - {"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"}, - {"crypto/internal/impl", "\xb5\x02"}, - {"crypto/internal/randutil", "\xf1\x01\x12"}, - {"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"}, - {"crypto/internal/sysrand/internal/seccomp", "n"}, - {"crypto/md5", "\x0e3-\x15\x16g"}, - {"crypto/mlkem", "/"}, - {"crypto/pbkdf2", "2\x0e\x01-\x15"}, - {"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"}, - {"crypto/rc4", "#\x1e-\xc6\x01"}, - {"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"}, - {"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"}, - {"crypto/sha256", "\x0e\f\x1aO"}, - {"crypto/sha3", "\x0e'N\xc8\x01"}, - {"crypto/sha512", "\x0e\f\x1cM"}, - {"crypto/subtle", "8\x9b\x01W"}, - {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, - {"crypto/tls/internal/fips140tls", "\x17\xa1\x02"}, - {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, - {"crypto/x509/pkix", "d\x06\a\x8d\x01G"}, - {"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, - {"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"}, - {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"}, - {"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"}, - {"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"}, - {"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"}, - {"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"}, - {"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"}, - {"debug/plan9obj", "g\a\x03e\x1b,"}, - {"embed", "n*@\x19\x01S"}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x056#+hM"}, + {"crypto/internal/impl", "\xb9\x02"}, + {"crypto/internal/randutil", "\xf5\x01\x12"}, + {"crypto/internal/sysrand", "qo! \r\r\x01\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "q"}, + {"crypto/md5", "\x0e6-\x15\x16h"}, + {"crypto/mlkem", "1"}, + {"crypto/pbkdf2", "4\x0f\x01-\x15"}, + {"crypto/rand", "\x1a\b\a\x1b\x04\x01(\x84\x01\rM"}, + {"crypto/rc4", "%\x1f-\xc7\x01"}, + {"crypto/rsa", "\x0e\f\x01\v\x0f\x0e\x01\x04\x06\a\x1c\x03\x123<\f\x01"}, + {"crypto/sha1", "\x0e\f*\x03*\x15\x16\x15S"}, + {"crypto/sha256", "\x0e\f\x1cP"}, + {"crypto/sha3", "\x0e)O\xc9\x01"}, + {"crypto/sha512", "\x0e\f\x1eN"}, + {"crypto/subtle", "\x1e\x1c\x9c\x01X"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\r\n\x01\n\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b<\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, + {"crypto/tls/internal/fips140tls", "\x17\xa5\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x015\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x039\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "g\x06\a\x8e\x01G"}, + {"database/sql", "\x03\nN\x16\x03\x81\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, + {"database/sql/driver", "\rd\x03\xb5\x01\x0f\x11"}, + {"debug/buildinfo", "\x03[\x02\x01\x01\b\a\x03e\x1a\x02\x01+\x0f\x1f"}, + {"debug/dwarf", "\x03g\a\x03\x81\x011\x11\x01\x01"}, + {"debug/elf", "\x03\x06T\r\a\x03e\x1b\x01\f \x17\x01\x16"}, + {"debug/gosym", "\x03g\n\xc3\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06T\r\ne\x1c,\x17\x01"}, + {"debug/pe", "\x03\x06T\r\a\x03e\x1c,\x17\x01\x16"}, + {"debug/plan9obj", "j\a\x03e\x1c,"}, + {"embed", "q*A\x19\x01S"}, {"embed/internal/embedtest", ""}, {"encoding", ""}, - {"encoding/ascii85", "\xf1\x01C"}, - {"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"}, - {"encoding/base32", "\xf1\x01A\x02"}, - {"encoding/base64", "\x99\x01XA\x02"}, - {"encoding/binary", "n\x83\x01\f(\r\x05"}, - {"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"}, - {"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, - {"encoding/hex", "n\x03\x80\x01A\x03"}, - {"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, - {"encoding/pem", "\x03c\b\x83\x01A\x03"}, - {"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"}, - {"errors", "\xca\x01\x81\x01"}, - {"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"}, - {"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"}, - {"fmt", "nE>\f \b\r\x02\x03\x12"}, - {"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"}, - {"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, - {"go/build/constraint", "n\xc6\x01\x01\x12\x02"}, - {"go/constant", "q\x0f}\x01\x024\x01\x02\x12"}, - {"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"}, - {"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"}, - {"go/format", "\x03n\x01\v\x01\x02qD"}, - {"go/importer", "s\a\x01\x01\x04\x01p9"}, - {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"}, - {"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"}, - {"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"}, - {"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"}, - {"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"}, - {"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"}, - {"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"}, - {"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, - {"go/version", "\xbb\x01z"}, - {"hash", "\xf1\x01"}, - {"hash/adler32", "n\x15\x16"}, - {"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"}, - {"hash/crc64", "n\x15\x16\x9e\x01"}, - {"hash/fnv", "n\x15\x16g"}, - {"hash/maphash", "\x83\x01\x11!\x03\x93\x01"}, - {"html", "\xb5\x02\x02\x12"}, - {"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, - {"image", "\x02l\x1ee\x0f4\x03\x01"}, + {"encoding/ascii85", "\xf5\x01C"}, + {"encoding/asn1", "\x03n\x03e(\x01'\r\x02\x01\x10\x03\x01"}, + {"encoding/base32", "\xf5\x01A\x02"}, + {"encoding/base64", "\x9c\x01YA\x02"}, + {"encoding/binary", "q\x84\x01\f(\r\x05"}, + {"encoding/csv", "\x02\x01n\x03\x81\x01D\x12\x02"}, + {"encoding/gob", "\x02c\x05\a\x03e\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, + {"encoding/hex", "q\x03\x81\x01A\x03"}, + {"encoding/json", "\x03\x01a\x04\b\x03\x81\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, + {"encoding/pem", "\x03f\b\x84\x01A\x03"}, + {"encoding/xml", "\x02\x01b\f\x03\x81\x014\x05\n\x01\x02\x10\x02"}, + {"errors", "\xcc\x01\x83\x01"}, + {"expvar", "nK@\b\v\x15\r\b\x02\x03\x01\x11"}, + {"flag", "e\f\x03\x81\x01,\b\x05\b\x02\x01\x10"}, + {"fmt", "qE&\x19\f \b\r\x02\x03\x12"}, + {"go/ast", "\x03\x01p\x0e\x01r\x03)\b\r\x02\x01\x12\x02"}, + {"go/build", "\x02\x01n\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\b\x1b\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, + {"go/build/constraint", "q\xc7\x01\x01\x12\x02"}, + {"go/constant", "t\x0f~\x01\x024\x01\x02\x12"}, + {"go/doc", "\x04p\x01\x05\t=51\x10\x02\x01\x12\x02"}, + {"go/doc/comment", "\x03q\xc2\x01\x01\x01\x01\x12\x02"}, + {"go/format", "\x03q\x01\v\x01\x02rD"}, + {"go/importer", "v\a\x01\x01\x04\x01q9"}, + {"go/internal/gccgoimporter", "\x02\x01[\x13\x03\x04\v\x01o\x02,\x01\x05\x11\x01\f\b"}, + {"go/internal/gcimporter", "\x02r\x0f\x010\x05\r/,\x15\x03\x02"}, + {"go/internal/srcimporter", "t\x01\x01\n\x03\x01q,\x01\x05\x12\x02\x14"}, + {"go/parser", "\x03n\x03\x01\x02\v\x01r\x01+\x06\x12"}, + {"go/printer", "t\x01\x02\x03\tr\f \x15\x02\x01\x02\v\x05\x02"}, + {"go/scanner", "\x03q\x0fr2\x10\x01\x13\x02"}, + {"go/token", "\x04p\x84\x01>\x02\x03\x01\x0f\x02"}, + {"go/types", "\x03\x01\x06g\x03\x01\x03\b\x03\x024\x062\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, + {"go/version", "\xbe\x01{"}, + {"hash", "\xf5\x01"}, + {"hash/adler32", "q\x15\x16"}, + {"hash/crc32", "q\x15\x16\x15\x8a\x01\x01\x13"}, + {"hash/crc64", "q\x15\x16\x9f\x01"}, + {"hash/fnv", "q\x15\x16h"}, + {"hash/maphash", "\x86\x01\x11<|"}, + {"html", "\xb9\x02\x02\x12"}, + {"html/template", "\x03k\x06\x18-<\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, + {"image", "\x02o\x1ef\x0f4\x03\x01"}, {"image/color", ""}, - {"image/color/palette", "\x8c\x01"}, - {"image/draw", "\x8b\x01\x01\x04"}, - {"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"}, - {"image/internal/imageutil", "\x8b\x01"}, - {"image/jpeg", "\x02l\x1d\x01\x04a"}, - {"image/png", "\x02\a^\n\x12\x02\x06\x01eC"}, - {"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"}, - {"internal/abi", "\xb5\x01\x96\x01"}, - {"internal/asan", "\xcb\x02"}, - {"internal/bisect", "\xaa\x02\r\x01"}, - {"internal/buildcfg", "qGe\x06\x02\x05\n\x01"}, - {"internal/bytealg", "\xae\x01\x9d\x01"}, + {"image/color/palette", "\x8f\x01"}, + {"image/draw", "\x8e\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05i\x03\x1a\x01\x01\x01\vY"}, + {"image/internal/imageutil", "\x8e\x01"}, + {"image/jpeg", "\x02o\x1d\x01\x04b"}, + {"image/png", "\x02\aa\n\x12\x02\x06\x01fC"}, + {"index/suffixarray", "\x03g\a\x84\x01\f+\n\x01"}, + {"internal/abi", "\xb8\x01\x97\x01"}, + {"internal/asan", "\xcf\x02"}, + {"internal/bisect", "\xae\x02\r\x01"}, + {"internal/buildcfg", "tGf\x06\x02\x05\n\x01"}, + {"internal/bytealg", "\xb1\x01\x9e\x01"}, {"internal/byteorder", ""}, {"internal/cfg", ""}, - {"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"}, - {"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"}, + {"internal/cgrouptest", "tZS\x06\x0f\x02\x01\x04\x01"}, + {"internal/chacha8rand", "\x9c\x01\x15\a\x97\x01"}, {"internal/copyright", ""}, {"internal/coverage", ""}, {"internal/coverage/calloc", ""}, - {"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"}, - {"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"}, - {"internal/coverage/cmerge", "q-_"}, - {"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"}, - {"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"}, - {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"}, - {"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."}, - {"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"}, - {"internal/coverage/rtcov", "\xcb\x02"}, - {"internal/coverage/slicereader", "g\n\x80\x01Z"}, - {"internal/coverage/slicewriter", "q\x80\x01"}, - {"internal/coverage/stringtab", "q8\x04D"}, + {"internal/coverage/cfile", "n\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02&,\x06\a\n\x01\x03\r\x06"}, + {"internal/coverage/cformat", "\x04p-\x04P\v6\x01\x02\r"}, + {"internal/coverage/cmerge", "t-`"}, + {"internal/coverage/decodecounter", "j\n-\v\x02G,\x17\x17"}, + {"internal/coverage/decodemeta", "\x02h\n\x16\x17\v\x02G,"}, + {"internal/coverage/encodecounter", "\x02h\n-\f\x01\x02E\v!\x15"}, + {"internal/coverage/encodemeta", "\x02\x01g\n\x12\x04\x17\r\x02E,."}, + {"internal/coverage/pods", "\x04p-\x80\x01\x06\x05\n\x02\x01"}, + {"internal/coverage/rtcov", "\xcf\x02"}, + {"internal/coverage/slicereader", "j\n\x81\x01Z"}, + {"internal/coverage/slicewriter", "t\x81\x01"}, + {"internal/coverage/stringtab", "t8\x04E"}, {"internal/coverage/test", ""}, {"internal/coverage/uleb128", ""}, - {"internal/cpu", "\xcb\x02"}, - {"internal/dag", "\x04m\xc1\x01\x03"}, - {"internal/diff", "\x03n\xc2\x01\x02"}, - {"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"}, - {"internal/filepathlite", "n*@\x1a@"}, - {"internal/fmtsort", "\x04\xa1\x02\r"}, - {"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, + {"internal/cpu", "\xcf\x02"}, + {"internal/dag", "\x04p\xc2\x01\x03"}, + {"internal/diff", "\x03q\xc3\x01\x02"}, + {"internal/exportdata", "\x02\x01n\x03\x02c\x1c,\x01\x05\x11\x01\x02"}, + {"internal/filepathlite", "q*A\x1a@"}, + {"internal/fmtsort", "\x04\xa5\x02\r"}, + {"internal/fuzz", "\x03\nE\x18\x04\x03\x03\x01\v\x036<\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, {"internal/goarch", ""}, - {"internal/godebug", "\x96\x01!\x80\x01\x01\x13"}, + {"internal/godebug", "\x99\x01!\x81\x01\x01\x13"}, {"internal/godebugs", ""}, {"internal/goexperiment", ""}, {"internal/goos", ""}, - {"internal/goroot", "\x9d\x02\x01\x05\x12\x02"}, + {"internal/goroot", "\xa1\x02\x01\x05\x12\x02"}, {"internal/gover", "\x04"}, {"internal/goversion", ""}, - {"internal/itoa", ""}, - {"internal/lazyregexp", "\x9d\x02\v\r\x02"}, - {"internal/lazytemplate", "\xf1\x01,\x18\x02\f"}, - {"internal/msan", "\xcb\x02"}, + {"internal/lazyregexp", "\xa1\x02\v\r\x02"}, + {"internal/lazytemplate", "\xf5\x01,\x18\x02\f"}, + {"internal/msan", "\xcf\x02"}, {"internal/nettrace", ""}, - {"internal/obscuretestdata", "f\x8b\x01,"}, - {"internal/oserror", "n"}, - {"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"}, + {"internal/obscuretestdata", "i\x8c\x01,"}, + {"internal/oserror", "q"}, + {"internal/pkgbits", "\x03O\x18\a\x03\x04\vr\r\x1f\r\n\x01"}, {"internal/platform", ""}, - {"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"}, - {"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"}, + {"internal/poll", "qj\x05\x159\r\x01\x01\f\x06"}, + {"internal/profile", "\x03\x04j\x03\x81\x017\n\x01\x01\x01\x10"}, {"internal/profilerecord", ""}, - {"internal/race", "\x94\x01\xb7\x01"}, - {"internal/reflectlite", "\x94\x01!9\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, - {"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, - {"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"}, - {"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"}, - {"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, - {"net/http/httptrace", "\rFnF\x14\n "}, - {"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, - {"net/http/internal", "\x02\x01k\x03\x80\x01"}, - {"net/http/internal/ascii", "\xb5\x02\x12"}, - {"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, - {"net/http/internal/testcert", "\xb5\x02"}, - {"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, + {"log/slog/internal/benchmarks", "\rd\x03\x81\x01\x06\x03:\x11"}, + {"log/slog/internal/buffer", "\xbb\x02"}, + {"log/syslog", "q\x03\x85\x01\x12\x16\x18\x02\x0e"}, + {"maps", "\xf8\x01W"}, + {"math", "\xb1\x01SK"}, + {"math/big", "\x03n\x03(\x15D\f\x03\x020\x02\x01\x02\x14"}, + {"math/big/internal/asmgen", "\x03\x01p\x90\x012\x03"}, + {"math/bits", "\xcf\x02"}, + {"math/cmplx", "\x81\x02\x03"}, + {"math/rand", "\xb9\x01H:\x01\x13"}, + {"math/rand/v2", "q+\x03b\x03K"}, + {"mime", "\x02\x01f\b\x03\x81\x01\v!\x15\x03\x02\x10\x02"}, + {"mime/multipart", "\x02\x01K#\x03E<\v\x01\a\x02\x15\x02\x06\x0f\x02\x01\x16"}, + {"mime/quotedprintable", "\x02\x01q\x81\x01"}, + {"net", "\x04\td*\x1e\n\x05\x12\x01\x01\x04\x15\x01%\x06\r\b\x05\x01\x01\f\x06\a"}, + {"net/http", "\x02\x01\x03\x01\x04\x02A\b\x13\x01\a\x03E<\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, + {"net/http/cgi", "\x02T\x1b\x03\x81\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, + {"net/http/cookiejar", "\x04m\x03\x97\x01\x01\b\f\x16\x03\x02\x0e\x04"}, + {"net/http/fcgi", "\x02\x01\n]\a\x03\x81\x01\x16\x01\x01\x14\x18\x02\x0e"}, + {"net/http/httptest", "\x02\x01\nI\x02\x1b\x01\x81\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, + {"net/http/httptrace", "\rImH\x14\n "}, + {"net/http/httputil", "\x02\x01\nd\x03\x81\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, + {"net/http/internal", "\x02\x01n\x03\x81\x01"}, + {"net/http/internal/ascii", "\xb9\x02\x12"}, + {"net/http/internal/httpcommon", "\rd\x03\x9d\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, + {"net/http/internal/testcert", "\xb9\x02"}, + {"net/http/pprof", "\x02\x01\ng\x18-\x02\x0e,\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, {"net/internal/cgotest", ""}, - {"net/internal/socktest", "q\xc6\x01\x02"}, - {"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, - {"net/netip", "\x04j*\x01$@\x034\x16"}, - {"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"}, - {"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"}, - {"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"}, - {"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"}, - {"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"}, - {"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, - {"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"}, - {"os/exec/internal/fdtest", "\xb9\x02"}, - {"os/signal", "\r\x90\x02\x15\x05\x02"}, - {"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"}, - {"path", "n*\xb1\x01"}, - {"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"}, - {"plugin", "n"}, - {"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"}, + {"net/internal/socktest", "t\xc7\x01\x02"}, + {"net/mail", "\x02o\x03\x81\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, + {"net/netip", "\x04m*\x01e\x034\x16"}, + {"net/rpc", "\x02j\x05\x03\x0f\nh\x04\x12\x01\x1d\r\x03\x02"}, + {"net/rpc/jsonrpc", "n\x03\x03\x81\x01\x16\x11\x1f"}, + {"net/smtp", "\x192\v\x13\b\x03\x81\x01\x16\x14\x1a"}, + {"net/textproto", "\x02\x01n\x03\x81\x01\f\n-\x01\x02\x14"}, + {"net/url", "q\x03\xa7\x01\v\x10\x02\x01\x16"}, + {"os", "q*\x01\x19\x03\x10\x13\x01\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, + {"os/exec", "\x03\ndH&\x01\x15\x01+\x06\a\n\x01\x04\f"}, + {"os/exec/internal/fdtest", "\xbd\x02"}, + {"os/signal", "\r\x94\x02\x15\x05\x02"}, + {"os/user", "\x02\x01n\x03\x81\x01,\r\n\x01\x02"}, + {"path", "q*\xb2\x01"}, + {"path/filepath", "q*\x1aA+\r\b\x03\x04\x10"}, + {"plugin", "q"}, + {"reflect", "q&\x04\x1d\x13\b\x03\x05\x17\x06\t-\n\x03\x10\x02\x02"}, {"reflect/internal/example1", ""}, {"reflect/internal/example2", ""}, - {"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"}, - {"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"}, - {"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"}, - {"runtime/coverage", "\xa0\x01Q"}, - {"runtime/debug", "qUW\r\b\x02\x01\x10\x06"}, - {"runtime/metrics", "\xb7\x01F-!"}, - {"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, - {"runtime/race", "\xb0\x02"}, + {"regexp", "\x03\xf2\x018\t\x02\x01\x02\x10\x02"}, + {"regexp/syntax", "\xb6\x02\x01\x01\x01\x02\x10\x02"}, + {"runtime", "\x97\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0e\x03\x01\x01\x01\x02\x01\x01\x02\x01\x04\x01\x10c"}, + {"runtime/coverage", "\xa3\x01R"}, + {"runtime/debug", "tTY\r\b\x02\x01\x10\x06"}, + {"runtime/metrics", "\xba\x01G-!"}, + {"runtime/pprof", "\x02\x01\x01\x03\x06]\a\x03#$\x0f+\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, + {"runtime/race", "\xb4\x02"}, {"runtime/race/internal/amd64v1", ""}, - {"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"}, - {"slices", "\x04\xf0\x01\fK"}, - {"sort", "\xca\x0162"}, - {"strconv", "n*@%\x03I"}, - {"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"}, + {"runtime/trace", "\rd\x03x\t9\b\x05\x01\r\x06"}, + {"slices", "\x04\xf4\x01\fK"}, + {"sort", "\xcc\x0182"}, + {"strconv", "q*@\x01q"}, + {"strings", "q&\x04A\x19\x03\f7\x10\x02\x02"}, {"structs", ""}, - {"sync", "\xc9\x01\x10\x01P\x0e\x13"}, - {"sync/atomic", "\xcb\x02"}, - {"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"}, - {"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"}, - {"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"}, - {"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"}, - {"testing/iotest", "\x03k\x03\x80\x01\x04"}, - {"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"}, - {"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"}, - {"testing/synctest", "\xda\x01`\x11"}, - {"text/scanner", "\x03n\x80\x01,*\x02"}, - {"text/tabwriter", "q\x80\x01X"}, - {"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, - {"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"}, - {"time", "n*\x1e\"(*\r\x02\x12"}, - {"time/tzdata", "n\xcb\x01\x12"}, + {"sync", "\xcb\x01\x12\x01P\x0e\x13"}, + {"sync/atomic", "\xcf\x02"}, + {"syscall", "q'\x03\x01\x1c\n\x03\x06\f\x04S\b\x05\x01\x13"}, + {"testing", "\x03\nd\x02\x01W\x16\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x04"}, + {"testing/fstest", "q\x03\x81\x01\x01\n&\x10\x03\b\b"}, + {"testing/internal/testdeps", "\x02\v\xaa\x01.\x10,\x03\x05\x03\x06\a\x02\x0e"}, + {"testing/iotest", "\x03n\x03\x81\x01\x04"}, + {"testing/quick", "s\x01\x8d\x01\x05#\x10\x10"}, + {"testing/slogtest", "\rd\x03\x87\x01.\x05\x10\v"}, + {"testing/synctest", "\xde\x01`\x11"}, + {"text/scanner", "\x03q\x81\x01,*\x02"}, + {"text/tabwriter", "t\x81\x01X"}, + {"text/template", "q\x03B?\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, + {"text/template/parse", "\x03q\xba\x01\n\x01\x12\x02"}, + {"time", "q*\x1e#(*\r\x02\x12"}, + {"time/tzdata", "q\xcc\x01\x12"}, {"unicode", ""}, {"unicode/utf16", ""}, {"unicode/utf8", ""}, - {"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"}, + {"unique", "\x97\x01!$\x01Q\r\x01\x13\x12"}, {"unsafe", ""}, - {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"}, - {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"}, - {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"}, + {"vendor/golang.org/x/crypto/chacha20", "\x10Z\a\x93\x01*&"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10Z\a\xdf\x01\x04\x01\a"}, + {"vendor/golang.org/x/crypto/cryptobyte", "g\n\x03\x8e\x01' \n"}, {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, - {"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"}, - {"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"}, - {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, - {"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"}, - {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"}, - {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"}, - {"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"}, - {"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, - {"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"}, - {"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"}, - {"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"}, - {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"}, - {"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"}, - {"weak", "\x94\x01\x96\x01!"}, + {"vendor/golang.org/x/crypto/internal/alias", "\xcf\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "U\x15\x9a\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "q"}, + {"vendor/golang.org/x/net/http/httpguts", "\x8b\x02\x14\x1a\x14\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "q\x03\x97\x01\x10\x05\x01\x18\x14\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03n\x03\x81\x01F"}, + {"vendor/golang.org/x/net/idna", "t\x8d\x018\x14\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03g\a\x03\x81\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, + {"vendor/golang.org/x/sys/cpu", "\xa1\x02\r\n\x01\x16"}, + {"vendor/golang.org/x/text/secure/bidirule", "q\xdc\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03n\x84\x01X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bi\x85\x01>\x16"}, + {"vendor/golang.org/x/text/unicode/norm", "j\n\x81\x01F\x12\x11"}, + {"weak", "\x97\x01\x97\x01!"}, } + +// bootstrap is the list of bootstrap packages extracted from cmd/dist. +var bootstrap = map[string]bool{ + "cmp": true, + "cmd/asm": true, + "cmd/asm/internal/arch": true, + "cmd/asm/internal/asm": true, + "cmd/asm/internal/flags": true, + "cmd/asm/internal/lex": true, + "cmd/cgo": true, + "cmd/compile": true, + "cmd/compile/internal/abi": true, + "cmd/compile/internal/abt": true, + "cmd/compile/internal/amd64": true, + "cmd/compile/internal/arm": true, + "cmd/compile/internal/arm64": true, + "cmd/compile/internal/base": true, + "cmd/compile/internal/bitvec": true, + "cmd/compile/internal/compare": true, + "cmd/compile/internal/coverage": true, + "cmd/compile/internal/deadlocals": true, + "cmd/compile/internal/devirtualize": true, + "cmd/compile/internal/dwarfgen": true, + "cmd/compile/internal/escape": true, + "cmd/compile/internal/gc": true, + "cmd/compile/internal/importer": true, + "cmd/compile/internal/inline": true, + "cmd/compile/internal/inline/inlheur": true, + "cmd/compile/internal/inline/interleaved": true, + "cmd/compile/internal/ir": true, + "cmd/compile/internal/liveness": true, + "cmd/compile/internal/logopt": true, + "cmd/compile/internal/loong64": true, + "cmd/compile/internal/loopvar": true, + "cmd/compile/internal/mips": true, + "cmd/compile/internal/mips64": true, + "cmd/compile/internal/noder": true, + "cmd/compile/internal/objw": true, + "cmd/compile/internal/pgoir": true, + "cmd/compile/internal/pkginit": true, + "cmd/compile/internal/ppc64": true, + "cmd/compile/internal/rangefunc": true, + "cmd/compile/internal/reflectdata": true, + "cmd/compile/internal/riscv64": true, + "cmd/compile/internal/rttype": true, + "cmd/compile/internal/s390x": true, + "cmd/compile/internal/ssa": true, + "cmd/compile/internal/ssagen": true, + "cmd/compile/internal/staticdata": true, + "cmd/compile/internal/staticinit": true, + "cmd/compile/internal/syntax": true, + "cmd/compile/internal/test": true, + "cmd/compile/internal/typebits": true, + "cmd/compile/internal/typecheck": true, + "cmd/compile/internal/types": true, + "cmd/compile/internal/types2": true, + "cmd/compile/internal/walk": true, + "cmd/compile/internal/wasm": true, + "cmd/compile/internal/x86": true, + "cmd/internal/archive": true, + "cmd/internal/bio": true, + "cmd/internal/codesign": true, + "cmd/internal/dwarf": true, + "cmd/internal/edit": true, + "cmd/internal/gcprog": true, + "cmd/internal/goobj": true, + "cmd/internal/hash": true, + "cmd/internal/macho": true, + "cmd/internal/obj": true, + "cmd/internal/obj/arm": true, + "cmd/internal/obj/arm64": true, + "cmd/internal/obj/loong64": true, + "cmd/internal/obj/mips": true, + "cmd/internal/obj/ppc64": true, + "cmd/internal/obj/riscv": true, + "cmd/internal/obj/s390x": true, + "cmd/internal/obj/wasm": true, + "cmd/internal/obj/x86": true, + "cmd/internal/objabi": true, + "cmd/internal/par": true, + "cmd/internal/pgo": true, + "cmd/internal/pkgpath": true, + "cmd/internal/quoted": true, + "cmd/internal/src": true, + "cmd/internal/sys": true, + "cmd/internal/telemetry": true, + "cmd/internal/telemetry/counter": true, + "cmd/link": true, + "cmd/link/internal/amd64": true, + "cmd/link/internal/arm": true, + "cmd/link/internal/arm64": true, + "cmd/link/internal/benchmark": true, + "cmd/link/internal/dwtest": true, + "cmd/link/internal/ld": true, + "cmd/link/internal/loadelf": true, + "cmd/link/internal/loader": true, + "cmd/link/internal/loadmacho": true, + "cmd/link/internal/loadpe": true, + "cmd/link/internal/loadxcoff": true, + "cmd/link/internal/loong64": true, + "cmd/link/internal/mips": true, + "cmd/link/internal/mips64": true, + "cmd/link/internal/ppc64": true, + "cmd/link/internal/riscv64": true, + "cmd/link/internal/s390x": true, + "cmd/link/internal/sym": true, + "cmd/link/internal/wasm": true, + "cmd/link/internal/x86": true, + "compress/flate": true, + "compress/zlib": true, + "container/heap": true, + "debug/dwarf": true, + "debug/elf": true, + "debug/macho": true, + "debug/pe": true, + "go/build/constraint": true, + "go/constant": true, + "go/version": true, + "internal/abi": true, + "internal/coverage": true, + "cmd/internal/cov/covcmd": true, + "internal/bisect": true, + "internal/buildcfg": true, + "internal/exportdata": true, + "internal/goarch": true, + "internal/godebugs": true, + "internal/goexperiment": true, + "internal/goroot": true, + "internal/gover": true, + "internal/goversion": true, + "internal/lazyregexp": true, + "internal/pkgbits": true, + "internal/platform": true, + "internal/profile": true, + "internal/race": true, + "internal/runtime/gc": true, + "internal/saferio": true, + "internal/syscall/unix": true, + "internal/types/errors": true, + "internal/unsafeheader": true, + "internal/xcoff": true, + "internal/zstd": true, + "math/bits": true, + "sort": true, +} + +// BootstrapVersion is the minor version of Go used during toolchain +// bootstrapping. Packages for which [IsBootstrapPackage] must not use +// features of Go newer than this version. +const BootstrapVersion = Version(24) // go1.24.6 diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go index f6909878a8a14..8ecc672b8b5f6 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/import.go +++ b/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -87,3 +87,11 @@ func find(pkg string) (int, bool) { return strings.Compare(p.name, n) }) } + +// IsBootstrapPackage reports whether pkg is one of the low-level +// packages in the Go distribution that must compile with the older +// language version specified by [BootstrapVersion] during toolchain +// bootstrapping; see golang.org/s/go15bootstrap. +func IsBootstrapPackage(pkg string) bool { + return bootstrap[pkg] +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index c1faa50d367c1..362f23c436c54 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -225,6 +225,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Buffer).Grow", Method, 1, ""}, {"(*Buffer).Len", Method, 0, ""}, {"(*Buffer).Next", Method, 0, ""}, + {"(*Buffer).Peek", Method, 26, ""}, {"(*Buffer).Read", Method, 0, ""}, {"(*Buffer).ReadByte", Method, 0, ""}, {"(*Buffer).ReadBytes", Method, 0, ""}, @@ -1628,6 +1629,7 @@ var PackageSymbols = map[string][]Symbol{ {"ResultNoRows", Var, 0, ""}, {"Rows", Type, 0, ""}, {"RowsAffected", Type, 0, ""}, + {"RowsColumnScanner", Type, 26, ""}, {"RowsColumnTypeDatabaseTypeName", Type, 8, ""}, {"RowsColumnTypeLength", Type, 8, ""}, {"RowsColumnTypeNullable", Type, 8, ""}, @@ -4953,6 +4955,7 @@ var PackageSymbols = map[string][]Symbol{ }, "errors": { {"As", Func, 13, "func(err error, target any) bool"}, + {"AsType", Func, 26, "func[E error](err error) (E, bool)"}, {"ErrUnsupported", Var, 21, ""}, {"Is", Func, 13, "func(err error, target error) bool"}, {"Join", Func, 20, "func(errs ...error) error"}, @@ -5090,7 +5093,7 @@ var PackageSymbols = map[string][]Symbol{ {"Append", Func, 19, "func(b []byte, a ...any) []byte"}, {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"}, {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"}, - {"Errorf", Func, 0, "func(format string, a ...any) error"}, + {"Errorf", Func, 0, "func(format string, a ...any) (err error)"}, {"FormatString", Func, 20, "func(state State, verb rune) string"}, {"Formatter", Type, 0, ""}, {"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"}, @@ -5155,6 +5158,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*DeclStmt).Pos", Method, 0, ""}, {"(*DeferStmt).End", Method, 0, ""}, {"(*DeferStmt).Pos", Method, 0, ""}, + {"(*Directive).End", Method, 26, ""}, + {"(*Directive).ParseArgs", Method, 26, ""}, + {"(*Directive).Pos", Method, 26, ""}, {"(*Ellipsis).End", Method, 0, ""}, {"(*Ellipsis).Pos", Method, 0, ""}, {"(*EmptyStmt).End", Method, 0, ""}, @@ -5320,6 +5326,15 @@ var PackageSymbols = map[string][]Symbol{ {"DeferStmt", Type, 0, ""}, {"DeferStmt.Call", Field, 0, ""}, {"DeferStmt.Defer", Field, 0, ""}, + {"Directive", Type, 26, ""}, + {"Directive.Args", Field, 26, ""}, + {"Directive.ArgsPos", Field, 26, ""}, + {"Directive.Name", Field, 26, ""}, + {"Directive.Slash", Field, 26, ""}, + {"Directive.Tool", Field, 26, ""}, + {"DirectiveArg", Type, 26, ""}, + {"DirectiveArg.Arg", Field, 26, ""}, + {"DirectiveArg.Pos", Field, 26, ""}, {"Ellipsis", Type, 0, ""}, {"Ellipsis.Ellipsis", Field, 0, ""}, {"Ellipsis.Elt", Field, 0, ""}, @@ -5469,6 +5484,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParenExpr.Lparen", Field, 0, ""}, {"ParenExpr.Rparen", Field, 0, ""}, {"ParenExpr.X", Field, 0, ""}, + {"ParseDirective", Func, 26, "func(pos token.Pos, c string) (Directive, bool)"}, {"Pkg", Const, 0, ""}, {"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"}, {"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"}, @@ -7271,6 +7287,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Logger).WarnContext", Method, 21, ""}, {"(*Logger).With", Method, 21, ""}, {"(*Logger).WithGroup", Method, 21, ""}, + {"(*MultiHandler).Enabled", Method, 26, ""}, + {"(*MultiHandler).Handle", Method, 26, ""}, + {"(*MultiHandler).WithAttrs", Method, 26, ""}, + {"(*MultiHandler).WithGroup", Method, 26, ""}, {"(*Record).Add", Method, 21, ""}, {"(*Record).AddAttrs", Method, 21, ""}, {"(*TextHandler).Enabled", Method, 21, ""}, @@ -7358,9 +7378,11 @@ var PackageSymbols = map[string][]Symbol{ {"LogValuer", Type, 21, ""}, {"Logger", Type, 21, ""}, {"MessageKey", Const, 21, ""}, + {"MultiHandler", Type, 26, ""}, {"New", Func, 21, "func(h Handler) *Logger"}, {"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"}, {"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"}, + {"NewMultiHandler", Func, 26, "func(handlers ...Handler) *MultiHandler"}, {"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"}, {"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"}, {"Record", Type, 21, ""}, @@ -7515,7 +7537,7 @@ var PackageSymbols = map[string][]Symbol{ {"MinInt64", Const, 0, ""}, {"MinInt8", Const, 0, ""}, {"Mod", Func, 0, "func(x float64, y float64) float64"}, - {"Modf", Func, 0, "func(f float64) (int float64, frac float64)"}, + {"Modf", Func, 0, "func(f float64) (integer float64, fractional float64)"}, {"NaN", Func, 0, "func() float64"}, {"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"}, {"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"}, @@ -7972,6 +7994,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*DNSError).Unwrap", Method, 23, ""}, {"(*Dialer).Dial", Method, 1, ""}, {"(*Dialer).DialContext", Method, 7, ""}, + {"(*Dialer).DialIP", Method, 26, ""}, + {"(*Dialer).DialTCP", Method, 26, ""}, + {"(*Dialer).DialUDP", Method, 26, ""}, + {"(*Dialer).DialUnix", Method, 26, ""}, {"(*Dialer).MultipathTCP", Method, 21, ""}, {"(*Dialer).SetMultipathTCP", Method, 21, ""}, {"(*IP).UnmarshalText", Method, 2, ""}, @@ -8457,6 +8483,7 @@ var PackageSymbols = map[string][]Symbol{ {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""}, {"HTTP2Config.PingTimeout", Field, 24, ""}, {"HTTP2Config.SendPingTimeout", Field, 24, ""}, + {"HTTP2Config.StrictMaxConcurrentRequests", Field, 26, ""}, {"HTTP2Config.WriteByteTimeout", Field, 24, ""}, {"Handle", Func, 0, "func(pattern string, handler Handler)"}, {"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"}, @@ -8904,6 +8931,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Prefix).AppendText", Method, 24, ""}, {"(Prefix).AppendTo", Method, 18, ""}, {"(Prefix).Bits", Method, 18, ""}, + {"(Prefix).Compare", Method, 26, ""}, {"(Prefix).Contains", Method, 18, ""}, {"(Prefix).IsSingleIP", Method, 18, ""}, {"(Prefix).IsValid", Method, 18, ""}, @@ -9177,6 +9205,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Process).Release", Method, 0, ""}, {"(*Process).Signal", Method, 0, ""}, {"(*Process).Wait", Method, 0, ""}, + {"(*Process).WithHandle", Method, 26, ""}, {"(*ProcessState).ExitCode", Method, 12, ""}, {"(*ProcessState).Exited", Method, 0, ""}, {"(*ProcessState).Pid", Method, 0, ""}, @@ -9234,6 +9263,7 @@ var PackageSymbols = map[string][]Symbol{ {"ErrExist", Var, 0, ""}, {"ErrInvalid", Var, 0, ""}, {"ErrNoDeadline", Var, 10, ""}, + {"ErrNoHandle", Var, 26, ""}, {"ErrNotExist", Var, 0, ""}, {"ErrPermission", Var, 0, ""}, {"ErrProcessDone", Var, 16, ""}, @@ -9461,7 +9491,7 @@ var PackageSymbols = map[string][]Symbol{ {"ListSeparator", Const, 0, ""}, {"Localize", Func, 23, "func(path string) (string, error)"}, {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"}, - {"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"}, + {"Rel", Func, 0, "func(basePath string, targPath string) (string, error)"}, {"Separator", Const, 0, ""}, {"SkipAll", Var, 20, ""}, {"SkipDir", Var, 0, ""}, @@ -9932,7 +9962,7 @@ var PackageSymbols = map[string][]Symbol{ {"PanicNilError", Type, 21, ""}, {"Pinner", Type, 21, ""}, {"ReadMemStats", Func, 0, "func(m *MemStats)"}, - {"ReadTrace", Func, 5, "func() []byte"}, + {"ReadTrace", Func, 5, "func() (buf []byte)"}, {"SetBlockProfileRate", Func, 1, "func(rate int)"}, {"SetCPUProfileRate", Func, 0, "func(hz int)"}, {"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"}, @@ -16679,6 +16709,7 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0, ""}, }, "testing": { + {"(*B).ArtifactDir", Method, 26, ""}, {"(*B).Attr", Method, 25, ""}, {"(*B).Chdir", Method, 24, ""}, {"(*B).Cleanup", Method, 14, ""}, @@ -16713,6 +16744,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0, ""}, {"(*B).TempDir", Method, 15, ""}, {"(*F).Add", Method, 18, ""}, + {"(*F).ArtifactDir", Method, 26, ""}, {"(*F).Attr", Method, 25, ""}, {"(*F).Chdir", Method, 24, ""}, {"(*F).Cleanup", Method, 18, ""}, @@ -16738,6 +16770,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18, ""}, {"(*M).Run", Method, 4, ""}, {"(*PB).Next", Method, 3, ""}, + {"(*T).ArtifactDir", Method, 26, ""}, {"(*T).Attr", Method, 25, ""}, {"(*T).Chdir", Method, 24, ""}, {"(*T).Cleanup", Method, 14, ""}, diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index f49802b8ef7fd..8d13f12147f5f 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -160,8 +160,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in // The term set of an interface is the intersection of the term sets of its // embedded types. tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) + for embedded := range u.EmbeddedTypes() { if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } @@ -174,8 +173,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) + for t := range u.Terms() { var terms termlist switch t.Type().Underlying().(type) { case *types.Interface: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go index 4957f0216418d..5fe4d8abcb5a7 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/element.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -35,8 +35,8 @@ func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T type // Recursion over signatures of each method. tmset := msets.MethodSet(T) - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) + for method := range tmset.Methods() { + sig := method.Type().(*types.Signature) // It is tempting to call visit(sig, false) // but, as noted in golang.org/cl/65450043, // the Signature.Recv field is ignored by diff --git a/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/vendor/golang.org/x/tools/internal/typesinternal/fx.go index 93acff21701e0..c846a53d5fe7f 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/fx.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/fx.go @@ -19,25 +19,46 @@ func NoEffects(info *types.Info, expr ast.Expr) bool { switch v := n.(type) { case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr, *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr, - *ast.StarExpr, *ast.CompositeLit, *ast.ArrayType, *ast.StructType, - *ast.MapType, *ast.InterfaceType, *ast.KeyValueExpr: - // No effect + *ast.StarExpr, *ast.CompositeLit, + // non-expressions that may appear within expressions + *ast.KeyValueExpr, + *ast.FieldList, + *ast.Field, + *ast.Ellipsis, + *ast.IndexListExpr: + // No effect. + + case *ast.ArrayType, + *ast.StructType, + *ast.ChanType, + *ast.FuncType, + *ast.MapType, + *ast.InterfaceType: + // Type syntax: no effects, recursively. + // Prune descent. + return false + case *ast.UnaryExpr: - // Channel send <-ch has effects + // Channel send <-ch has effects. if v.Op == token.ARROW { noEffects = false } + case *ast.CallExpr: - // Type conversion has no effects + // Type conversion has no effects. if !info.Types[v.Fun].IsType() { - // TODO(adonovan): Add a case for built-in functions without side - // effects (by using callsPureBuiltin from tools/internal/refactor/inline) - - noEffects = false + if CallsPureBuiltin(info, v) { + // A call such as len(e) has no effects of its + // own, though the subexpression e might. + } else { + noEffects = false + } } + case *ast.FuncLit: // A FuncLit has no effects, but do not descend into it. return false + default: // All other expressions have effects noEffects = false @@ -47,3 +68,21 @@ func NoEffects(info *types.Info, expr ast.Expr) bool { }) return noEffects } + +// CallsPureBuiltin reports whether call is a call of a built-in +// function that is a pure computation over its operands (analogous to +// a + operator). Because it does not depend on program state, it may +// be evaluated at any point--though not necessarily at multiple +// points (consider new, make). +func CallsPureBuiltin(info *types.Info, call *ast.CallExpr) bool { + if id, ok := ast.Unparen(call.Fun).(*ast.Ident); ok { + if b, ok := info.ObjectOf(id).(*types.Builtin); ok { + switch b.Name() { + case "len", "cap", "complex", "imag", "real", "make", "new", "max", "min": + return true + } + // Not: append clear close copy delete panic print println recover + } + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go index f2affec4fba92..e0d63c46c6aa7 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go @@ -48,7 +48,7 @@ func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { return ok && IsPackageLevel(obj) && f.Pkg().Path() == pkgPath && - f.Type().(*types.Signature).Recv() == nil && + f.Signature().Recv() == nil && slices.Contains(names, f.Name()) } @@ -60,7 +60,7 @@ func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { // which is important for the performance of syntax matching. func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { if fn, ok := obj.(*types.Func); ok { - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { _, T := ReceiverNamed(recv) return T != nil && IsTypeNamed(T, pkgPath, typeName) && diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go index 64f47919f02e2..4e2756fc491ba 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -19,7 +19,7 @@ import ( // TODO(adonovan): this function ignores the effect of shadowing. It // should accept a [token.Pos] and a [types.Info] and compute only the // set of imports that are not shadowed at that point, analogous to -// [analysisinternal.AddImport]. It could also compute (as a side +// [analysis.AddImport]. It could also compute (as a side // effect) the set of additional imports required to ensure that there // is an accessible import for each necessary package, making it // converge even more closely with AddImport. diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go index e5da0495111ba..26499cdd2e70f 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -2,39 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package typesinternal +//go:build go1.25 -// TODO(adonovan): when CL 645115 lands, define the go1.25 version of -// this API that actually does something. +package typesinternal import "go/types" -type VarKind uint8 +type VarKind = types.VarKind const ( - _ VarKind = iota // (not meaningful) - PackageVar // a package-level variable - LocalVar // a local variable - RecvVar // a method receiver variable - ParamVar // a function parameter variable - ResultVar // a function result variable - FieldVar // a struct field + PackageVar = types.PackageVar + LocalVar = types.LocalVar + RecvVar = types.RecvVar + ParamVar = types.ParamVar + ResultVar = types.ResultVar + FieldVar = types.FieldVar ) -func (kind VarKind) String() string { - return [...]string{ - 0: "VarKind(0)", - PackageVar: "PackageVar", - LocalVar: "LocalVar", - RecvVar: "RecvVar", - ParamVar: "ParamVar", - ResultVar: "ResultVar", - FieldVar: "FieldVar", - }[kind] -} - -// GetVarKind returns an invalid VarKind. -func GetVarKind(v *types.Var) VarKind { return 0 } - -// SetVarKind has no effect. -func SetVarKind(v *types.Var, kind VarKind) {} +func GetVarKind(v *types.Var) VarKind { return v.Kind() } +func SetVarKind(v *types.Var, kind VarKind) { v.SetKind(kind) } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go new file mode 100644 index 0000000000000..17b1804b4e85e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.25 + +package typesinternal + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index 453bba2ad5e84..d612a7102971b 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -258,12 +258,12 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { case *types.Signature: var params []*ast.Field - for i := 0; i < t.Params().Len(); i++ { + for v := range t.Params().Variables() { params = append(params, &ast.Field{ - Type: TypeExpr(t.Params().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), Names: []*ast.Ident{ { - Name: t.Params().At(i).Name(), + Name: v.Name(), }, }, }) @@ -273,9 +273,9 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} } var returns []*ast.Field - for i := 0; i < t.Results().Len(); i++ { + for v := range t.Results().Variables() { returns = append(returns, &ast.Field{ - Type: TypeExpr(t.Results().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), }) } return &ast.FuncType{ @@ -315,8 +315,8 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { var indices []ast.Expr - for i := range typeArgs.Len() { - indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + for t0 := range typeArgs.Types() { + indices = append(indices, TypeExpr(t0, qual)) } expr = &ast.IndexListExpr{ X: expr, diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go index b53f178616135..a5f4e3252cce2 100644 --- a/vendor/golang.org/x/tools/internal/versions/features.go +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -7,13 +7,17 @@ package versions // This file contains predicates for working with file versions to // decide when a tool should consider a language feature enabled. -// GoVersions that features in x/tools can be gated to. +// named constants, to avoid misspelling const ( Go1_18 = "go1.18" Go1_19 = "go1.19" Go1_20 = "go1.20" Go1_21 = "go1.21" Go1_22 = "go1.22" + Go1_23 = "go1.23" + Go1_24 = "go1.24" + Go1_25 = "go1.25" + Go1_26 = "go1.26" ) // Future is an invalid unknown Go version sometime in the future. diff --git a/vendor/modules.txt b/vendor/modules.txt index 61af805d88655..b12a24d359fce 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -227,8 +227,6 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric # github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 ## explicit; go 1.24.0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping -# github.com/HdrHistogram/hdrhistogram-go v1.1.2 -## explicit; go 1.14 # github.com/IBM/go-sdk-core/v5 v5.21.2 ## explicit; go 1.24.0 github.com/IBM/go-sdk-core/v5/core @@ -1191,8 +1189,8 @@ github.com/gorilla/websocket # github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 ## explicit; go 1.17 github.com/grafana/cloudflare-go -# github.com/grafana/dskit v0.0.0-20250917065751-798f5a8fa154 -## explicit; go 1.23.0 +# github.com/grafana/dskit v0.0.0-20251210115601-41c7cf07196b +## explicit; go 1.24.0 github.com/grafana/dskit/backoff github.com/grafana/dskit/cancellation github.com/grafana/dskit/clusterutil @@ -1237,7 +1235,7 @@ github.com/grafana/dskit/user # github.com/grafana/go-gelf/v2 v2.0.1 ## explicit; go 1.17 github.com/grafana/go-gelf/v2/gelf -# github.com/grafana/gomemcache v0.0.0-20250828162811-a96f6acee2fe +# github.com/grafana/gomemcache v0.0.0-20251127154401-74f93547077b ## explicit; go 1.21 github.com/grafana/gomemcache/memcache # github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 @@ -1249,7 +1247,7 @@ github.com/grafana/loki/pkg/push # github.com/grafana/otel-profiling-go v0.5.1 ## explicit; go 1.16 github.com/grafana/otel-profiling-go -# github.com/grafana/pyroscope-go/godeltaprof v0.1.8 +# github.com/grafana/pyroscope-go/godeltaprof v0.1.9 ## explicit; go 1.18 github.com/grafana/pyroscope-go/godeltaprof github.com/grafana/pyroscope-go/godeltaprof/http/pprof @@ -1294,9 +1292,9 @@ github.com/hashicorp/go-immutable-radix github.com/hashicorp/go-metrics github.com/hashicorp/go-metrics/compat github.com/hashicorp/go-metrics/prometheus -# github.com/hashicorp/go-msgpack v0.5.5 -## explicit -github.com/hashicorp/go-msgpack/codec +# github.com/hashicorp/go-msgpack/v2 v2.1.2 +## explicit; go 1.19 +github.com/hashicorp/go-msgpack/v2/codec # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror @@ -1323,8 +1321,8 @@ github.com/hashicorp/golang-lru/simplelru github.com/hashicorp/golang-lru/v2 github.com/hashicorp/golang-lru/v2/internal github.com/hashicorp/golang-lru/v2/simplelru -# github.com/hashicorp/memberlist v0.5.3 => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe -## explicit; go 1.12 +# github.com/hashicorp/memberlist v0.5.3 => github.com/grafana/memberlist v0.3.1-0.20251126142931-6f9f62ab6f86 +## explicit; go 1.20 github.com/hashicorp/memberlist # github.com/hashicorp/serf v0.10.2 ## explicit; go 1.19 @@ -1684,8 +1682,8 @@ github.com/pierrec/lz4/v4/internal/lz4block github.com/pierrec/lz4/v4/internal/lz4errors github.com/pierrec/lz4/v4/internal/lz4stream github.com/pierrec/lz4/v4/internal/xxh32 -# github.com/pires/go-proxyproto v0.7.0 -## explicit; go 1.18 +# github.com/pires/go-proxyproto v0.8.1 +## explicit; go 1.24 github.com/pires/go-proxyproto # github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c ## explicit; go 1.14 @@ -1900,7 +1898,7 @@ github.com/sean-/seed # github.com/segmentio/fasthash v1.0.3 ## explicit; go 1.11 github.com/segmentio/fasthash/fnv1a -# github.com/sercand/kuberesolver/v6 v6.0.0 +# github.com/sercand/kuberesolver/v6 v6.0.1 ## explicit; go 1.22.0 github.com/sercand/kuberesolver/v6 # github.com/sethvargo/go-retry v0.3.0 @@ -2085,16 +2083,17 @@ go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version go.etcd.io/etcd/api/v3/versionpb -# go.etcd.io/etcd/client/pkg/v3 v3.5.4 -## explicit; go 1.16 +# go.etcd.io/etcd/client/pkg/v3 v3.6.6 +## explicit; go 1.24 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.4 -## explicit; go 1.16 +go.etcd.io/etcd/client/pkg/v3/verify +# go.etcd.io/etcd/client/v3 v3.6.6 +## explicit; go 1.24 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint @@ -2165,13 +2164,13 @@ go.opentelemetry.io/collector/processor/internal # go.opentelemetry.io/collector/semconv v0.128.0 ## explicit; go 1.23.0 go.opentelemetry.io/collector/semconv/v1.6.1 -# go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 +# go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/bridges/prometheus # go.opentelemetry.io/contrib/detectors/gcp v1.38.0 ## explicit; go 1.23.8 go.opentelemetry.io/contrib/detectors/gcp -# go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 +# go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/exporters/autoexport # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 @@ -2187,13 +2186,13 @@ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/int go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv -# go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 +## explicit; go 1.23.0 go.opentelemetry.io/contrib/propagators/jaeger -# go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0 +# go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/samplers/jaegerremote -go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils +go.opentelemetry.io/contrib/samplers/jaegerremote/internal/ratelimiter # go.opentelemetry.io/otel v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel @@ -2205,7 +2204,6 @@ go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.18.0 -go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.37.0 @@ -2219,17 +2217,17 @@ go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal @@ -2237,7 +2235,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envco go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal @@ -2263,18 +2261,20 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.58.0 +# go.opentelemetry.io/otel/exporters/prometheus v0.60.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/prometheus -# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 +# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/stdout/stdoutlog # go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric -# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace +go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter +go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x # go.opentelemetry.io/otel/log v0.14.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/log @@ -2295,9 +2295,10 @@ go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/internal/x go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/log v0.12.2 +# go.opentelemetry.io/otel/sdk/log v0.14.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/log +go.opentelemetry.io/otel/sdk/log/internal/x # go.opentelemetry.io/otel/sdk/metric v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/metric @@ -2372,11 +2373,11 @@ golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 +# golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 ## explicit; go 1.24.0 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/mod v0.29.0 +# golang.org/x/mod v0.30.0 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module @@ -2428,7 +2429,7 @@ golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc/eventlog -# golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 +# golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 ## explicit; go 1.24.0 golang.org/x/telemetry/counter golang.org/x/telemetry/internal/counter @@ -2464,7 +2465,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.14.0 ## explicit; go 1.24.0 golang.org/x/time/rate -# golang.org/x/tools v0.38.0 +# golang.org/x/tools v0.39.0 ## explicit; go 1.24.0 golang.org/x/tools/cmd/goimports golang.org/x/tools/cmd/stringer @@ -3123,7 +3124,7 @@ zombiezen.com/go/sqlite/sqlitex # github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v68.0.0+incompatible # github.com/Azure/azure-storage-blob-go => github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20240322194317-344980fda573 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 -# github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe +# github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20251126142931-6f9f62ab6f86 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc # github.com/grafana/loki/pkg/push => ./pkg/push # github.com/influxdata/go-syslog/v3 => github.com/leodido/go-syslog/v4 v4.3.0